]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'intx' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/misc-2.6
authorLinus Torvalds <torvalds@woody.osdl.org>
Thu, 7 Dec 2006 23:04:20 +0000 (15:04 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Thu, 7 Dec 2006 23:04:20 +0000 (15:04 -0800)
* 'intx' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/misc-2.6:
  PCI MSI: always toggle legacy-INTx-enable bit upon MSI entry/exit

2165 files changed:
Documentation/DMA-API.txt
Documentation/DocBook/Makefile
Documentation/DocBook/kernel-api.tmpl
Documentation/IPMI.txt
Documentation/block/as-iosched.txt
Documentation/devices.txt
Documentation/filesystems/Locking
Documentation/filesystems/fuse.txt
Documentation/filesystems/sysv-fs.txt
Documentation/i386/boot.txt
Documentation/kernel-parameters.txt
Documentation/networking/00-INDEX
Documentation/networking/generic_netlink.txt [new file with mode: 0644]
Documentation/power/s2ram.txt [new file with mode: 0644]
Documentation/power/swsusp-and-swap-files.txt [new file with mode: 0644]
Documentation/power/swsusp.txt
Documentation/power/userland-swsusp.txt
Documentation/powerpc/booting-without-of.txt
Documentation/powerpc/mpc52xx-device-tree-bindings.txt [new file with mode: 0644]
Documentation/scsi/scsi_mid_low_api.txt
Documentation/stable_api_nonsense.txt
Documentation/sysctl/kernel.txt
Documentation/x86_64/boot-options.txt
MAINTAINERS
README
REPORTING-BUGS
arch/alpha/kernel/pci.c
arch/alpha/kernel/sys_miata.c
arch/alpha/kernel/sys_nautilus.c
arch/alpha/mm/fault.c
arch/arm/common/sharpsl_pm.c
arch/arm/kernel/signal.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-nokia770.c
arch/arm/mach-omap1/devices.c
arch/arm/mach-omap1/leds-osk.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-pxa/akita-ioexp.c
arch/arm/mach-s3c2410/dma.c
arch/arm/mm/fault.c
arch/arm26/kernel/ecard.c
arch/arm26/mm/fault.c
arch/arm26/mm/memc.c
arch/avr32/kernel/kprobes.c
arch/avr32/kernel/signal.c
arch/avr32/mm/dma-coherent.c
arch/cris/mm/fault.c
arch/frv/kernel/futex.c
arch/frv/kernel/setup.c
arch/frv/kernel/signal.c
arch/frv/mm/fault.c
arch/frv/mm/pgalloc.c
arch/h8300/kernel/setup.c
arch/h8300/kernel/signal.c
arch/h8300/kernel/vmlinux.lds.S
arch/i386/Kconfig
arch/i386/Kconfig.cpu
arch/i386/Kconfig.debug
arch/i386/Makefile
arch/i386/Makefile.cpu
arch/i386/boot/compressed/Makefile
arch/i386/boot/compressed/head.S
arch/i386/boot/compressed/misc.c
arch/i386/boot/compressed/relocs.c [new file with mode: 0644]
arch/i386/boot/compressed/vmlinux.lds [new file with mode: 0644]
arch/i386/boot/compressed/vmlinux.scr
arch/i386/boot/setup.S
arch/i386/defconfig
arch/i386/kernel/Makefile
arch/i386/kernel/acpi/cstate.c
arch/i386/kernel/acpi/earlyquirk.c
arch/i386/kernel/alternative.c
arch/i386/kernel/apic.c
arch/i386/kernel/apm.c
arch/i386/kernel/asm-offsets.c
arch/i386/kernel/cpu/amd.c
arch/i386/kernel/cpu/common.c
arch/i386/kernel/cpu/intel.c
arch/i386/kernel/cpu/intel_cacheinfo.c
arch/i386/kernel/cpu/mcheck/non-fatal.c
arch/i386/kernel/cpu/mcheck/therm_throt.c
arch/i386/kernel/cpu/mtrr/Makefile
arch/i386/kernel/cpu/mtrr/amd.c
arch/i386/kernel/cpu/mtrr/centaur.c
arch/i386/kernel/cpu/mtrr/cyrix.c
arch/i386/kernel/cpu/mtrr/generic.c
arch/i386/kernel/cpu/mtrr/if.c
arch/i386/kernel/cpu/mtrr/main.c
arch/i386/kernel/cpu/mtrr/mtrr.h
arch/i386/kernel/cpu/proc.c
arch/i386/kernel/cpuid.c
arch/i386/kernel/crash.c
arch/i386/kernel/e820.c [new file with mode: 0644]
arch/i386/kernel/efi.c
arch/i386/kernel/entry.S
arch/i386/kernel/head.S
arch/i386/kernel/hpet.c
arch/i386/kernel/i8259.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/kprobes.c
arch/i386/kernel/ldt.c
arch/i386/kernel/mca.c
arch/i386/kernel/microcode.c
arch/i386/kernel/module.c
arch/i386/kernel/mpparse.c
arch/i386/kernel/msr.c
arch/i386/kernel/nmi.c
arch/i386/kernel/paravirt.c [new file with mode: 0644]
arch/i386/kernel/pci-dma.c
arch/i386/kernel/process.c
arch/i386/kernel/ptrace.c
arch/i386/kernel/quirks.c
arch/i386/kernel/reboot.c
arch/i386/kernel/setup.c
arch/i386/kernel/signal.c
arch/i386/kernel/smp.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/sysenter.c
arch/i386/kernel/time.c
arch/i386/kernel/time_hpet.c
arch/i386/kernel/topology.c
arch/i386/kernel/traps.c
arch/i386/kernel/tsc.c
arch/i386/kernel/vm86.c
arch/i386/kernel/vmlinux.lds.S
arch/i386/mach-generic/probe.c
arch/i386/mach-voyager/voyager_cat.c
arch/i386/mach-voyager/voyager_smp.c
arch/i386/math-emu/fpu_emu.h
arch/i386/math-emu/fpu_entry.c
arch/i386/math-emu/fpu_system.h
arch/i386/math-emu/load_store.c
arch/i386/math-emu/reg_ld_str.c
arch/i386/mm/boot_ioremap.c
arch/i386/mm/discontig.c
arch/i386/mm/fault.c
arch/i386/mm/highmem.c
arch/i386/mm/hugetlbpage.c
arch/i386/mm/init.c
arch/i386/mm/pageattr.c
arch/i386/mm/pgtable.c
arch/i386/pci/early.c
arch/i386/pci/irq.c
arch/i386/pci/pcbios.c
arch/i386/power/Makefile
arch/i386/power/cpu.c
arch/i386/power/suspend.c [new file with mode: 0644]
arch/i386/power/swsusp.S
arch/ia64/hp/sim/simserial.c
arch/ia64/ia32/binfmt_elf32.c
arch/ia64/ia32/ia32_support.c
arch/ia64/ia32/ia32priv.h
arch/ia64/ia32/sys_ia32.c
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/topology.c
arch/ia64/mm/hugetlbpage.c
arch/ia64/mm/init.c
arch/ia64/pci/pci.c
arch/m32r/kernel/setup.c
arch/m32r/kernel/signal.c
arch/m32r/mm/discontig.c
arch/m68k/amiga/chipram.c
arch/m68k/atari/hades-pci.c
arch/m68k/mm/fault.c
arch/m68knommu/kernel/vmlinux.lds.S
arch/m68knommu/platform/5307/timers.c
arch/m68knommu/platform/68360/config.c
arch/mips/Kconfig
arch/mips/dec/ecc-berr.c
arch/mips/dec/ioasic-irq.c
arch/mips/dec/kn01-berr.c
arch/mips/dec/kn02-irq.c
arch/mips/emma2rh/common/irq_emma2rh.c
arch/mips/emma2rh/markeins/irq_markeins.c
arch/mips/jazz/irq.c
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/i8259.c
arch/mips/kernel/irixelf.c
arch/mips/kernel/irq-mv6434x.c
arch/mips/kernel/irq-rm7000.c
arch/mips/kernel/irq-rm9000.c
arch/mips/kernel/irq.c
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/kspd.c
arch/mips/kernel/linux32.c
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/smp.c
arch/mips/lasat/interrupt.c
arch/mips/lib-32/Makefile
arch/mips/lib-32/csum_partial.S [deleted file]
arch/mips/lib-64/Makefile
arch/mips/lib-64/csum_partial.S [deleted file]
arch/mips/lib/Makefile
arch/mips/lib/csum_partial.S [new file with mode: 0644]
arch/mips/mm/dma-coherent.c
arch/mips/mm/dma-ip27.c
arch/mips/mm/dma-ip32.c
arch/mips/mm/dma-noncoherent.c
arch/mips/mm/highmem.c
arch/mips/momentum/ocelot_c/cpci-irq.c
arch/mips/momentum/ocelot_c/uart-irq.c
arch/mips/philips/pnx8550/common/int.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip27/ip27-timer.c
arch/mips/sibyte/swarm/setup.c
arch/mips/tx4927/common/tx4927_irq.c
arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
arch/mips/tx4938/common/irq.c
arch/mips/tx4938/toshiba_rbtx4938/irq.c
arch/mips/vr41xx/Kconfig
arch/mips/vr41xx/common/icu.c
arch/parisc/kernel/binfmt_elf32.c
arch/parisc/mm/fault.c
arch/powerpc/.gitignore [new file with mode: 0644]
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/boot/.gitignore
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/kuroboxHG.dts [new file with mode: 0644]
arch/powerpc/boot/dts/lite5200.dts [new file with mode: 0644]
arch/powerpc/boot/dts/lite5200b.dts [new file with mode: 0644]
arch/powerpc/boot/dts/mpc7448hpc2.dts
arch/powerpc/boot/flatdevtree.c [new file with mode: 0644]
arch/powerpc/boot/flatdevtree.h
arch/powerpc/boot/flatdevtree_env.h [new file with mode: 0644]
arch/powerpc/boot/flatdevtree_misc.c [new file with mode: 0644]
arch/powerpc/boot/io.h [new file with mode: 0644]
arch/powerpc/boot/main.c
arch/powerpc/boot/mktree.c [new file with mode: 0644]
arch/powerpc/boot/ns16550.c [new file with mode: 0644]
arch/powerpc/boot/of.c
arch/powerpc/boot/ops.h
arch/powerpc/boot/serial.c [new file with mode: 0644]
arch/powerpc/boot/simple_alloc.c [new file with mode: 0644]
arch/powerpc/boot/stdio.c
arch/powerpc/boot/util.S [new file with mode: 0644]
arch/powerpc/boot/wrapper
arch/powerpc/boot/zImage.coff.lds.S
arch/powerpc/configs/cell_defconfig
arch/powerpc/configs/linkstation_defconfig [new file with mode: 0644]
arch/powerpc/configs/lite5200_defconfig [new file with mode: 0644]
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/ps3_defconfig [new file with mode: 0644]
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_ppc970.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/dma_64.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/ibmebus.c
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/idle_power4.S
arch/powerpc/kernel/io.c
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/of_device.c
arch/powerpc/kernel/of_platform.c [new file with mode: 0644]
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_direct_iommu.c [deleted file]
arch/powerpc/kernel/pci_iommu.c [deleted file]
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_parse.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/rtas_pci.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/smp-tbsync.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/sys_ppc32.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vio.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/mm/Makefile
arch/powerpc/mm/fault.c
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb.c
arch/powerpc/oprofile/Makefile
arch/powerpc/oprofile/common.c
arch/powerpc/oprofile/op_model_cell.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/Makefile [new file with mode: 0644]
arch/powerpc/platforms/52xx/efika-pci.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/efika-setup.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/efika.h [new file with mode: 0644]
arch/powerpc/platforms/52xx/lite5200.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/mpc52xx_common.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/mpc52xx_pic.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/mpc52xx_pic.h [new file with mode: 0644]
arch/powerpc/platforms/82xx/mpc82xx_ads.c
arch/powerpc/platforms/83xx/mpc832x_mds.c
arch/powerpc/platforms/83xx/mpc834x_itx.c
arch/powerpc/platforms/83xx/mpc834x_sys.c
arch/powerpc/platforms/83xx/mpc8360e_pb.c
arch/powerpc/platforms/83xx/mpc83xx.h
arch/powerpc/platforms/83xx/pci.c
arch/powerpc/platforms/85xx/misc.c
arch/powerpc/platforms/85xx/mpc85xx_ads.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/Makefile
arch/powerpc/platforms/cell/Kconfig
arch/powerpc/platforms/cell/Makefile
arch/powerpc/platforms/cell/cbe_cpufreq.c [new file with mode: 0644]
arch/powerpc/platforms/cell/cbe_regs.c
arch/powerpc/platforms/cell/cbe_regs.h
arch/powerpc/platforms/cell/cbe_thermal.c [new file with mode: 0644]
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/interrupt.h
arch/powerpc/platforms/cell/io-workarounds.c [new file with mode: 0644]
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/iommu.h [deleted file]
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/cell/pmu.c [new file with mode: 0644]
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spu_coredump.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spu_priv1_mmio.c
arch/powerpc/platforms/cell/spu_priv1_mmio.h [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/Makefile
arch/powerpc/platforms/cell/spufs/backing_ops.c
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/coredump.c [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/hw_ops.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/platforms/chrp/chrp.h
arch/powerpc/platforms/chrp/pci.c
arch/powerpc/platforms/chrp/setup.c
arch/powerpc/platforms/embedded6xx/Kconfig
arch/powerpc/platforms/embedded6xx/Makefile
arch/powerpc/platforms/embedded6xx/linkstation.c [new file with mode: 0644]
arch/powerpc/platforms/embedded6xx/ls_uart.c [new file with mode: 0644]
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
arch/powerpc/platforms/iseries/Makefile
arch/powerpc/platforms/iseries/dt.c
arch/powerpc/platforms/iseries/iommu.c
arch/powerpc/platforms/iseries/ksyms.c
arch/powerpc/platforms/iseries/misc.S
arch/powerpc/platforms/iseries/pci.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/iseries/viopath.c
arch/powerpc/platforms/maple/maple.h
arch/powerpc/platforms/maple/pci.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/pasemi/pasemi.h
arch/powerpc/platforms/pasemi/pci.c
arch/powerpc/platforms/pasemi/setup.c
arch/powerpc/platforms/powermac/backlight.c
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/pci.c
arch/powerpc/platforms/powermac/pmac.h
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/ps3/Kconfig [new file with mode: 0644]
arch/powerpc/platforms/ps3/Makefile [new file with mode: 0644]
arch/powerpc/platforms/ps3/exports.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/htab.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/hvcall.S [new file with mode: 0644]
arch/powerpc/platforms/ps3/interrupt.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/mm.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/os-area.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/platform.h [new file with mode: 0644]
arch/powerpc/platforms/ps3/repository.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/setup.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/smp.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/spu.c [new file with mode: 0644]
arch/powerpc/platforms/ps3/time.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/eeh_event.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/pci.c
arch/powerpc/platforms/pseries/pci_dlpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/dart_iommu.c
arch/powerpc/sysdev/dcr-low.S [new file with mode: 0644]
arch/powerpc/sysdev/dcr.c [new file with mode: 0644]
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/qe_lib/qe.c
arch/powerpc/sysdev/qe_lib/ucc_fast.c
arch/powerpc/sysdev/qe_lib/ucc_slow.c
arch/powerpc/sysdev/rom.c [new file with mode: 0644]
arch/powerpc/sysdev/todc.c [deleted file]
arch/powerpc/sysdev/tsi108_pci.c
arch/powerpc/xmon/Makefile
arch/powerpc/xmon/dis-asm.h [new file with mode: 0644]
arch/powerpc/xmon/ppc-dis.c
arch/powerpc/xmon/ppc-opc.c
arch/powerpc/xmon/ppc.h
arch/powerpc/xmon/spu-dis.c [new file with mode: 0644]
arch/powerpc/xmon/spu-insns.h [new file with mode: 0644]
arch/powerpc/xmon/spu-opc.c [new file with mode: 0644]
arch/powerpc/xmon/spu.h [new file with mode: 0644]
arch/powerpc/xmon/xmon.c
arch/ppc/.gitignore [new file with mode: 0644]
arch/ppc/8260_io/fcc_enet.c
arch/ppc/8xx_io/fec.c
arch/ppc/Kconfig
arch/ppc/boot/images/.gitignore [new file with mode: 0644]
arch/ppc/boot/lib/.gitignore [new file with mode: 0644]
arch/ppc/boot/utils/.gitignore [new file with mode: 0644]
arch/ppc/kernel/setup.c
arch/ppc/kernel/traps.c
arch/ppc/kernel/vmlinux.lds.S
arch/ppc/platforms/4xx/bubinga.c
arch/ppc/platforms/4xx/cpci405.c
arch/ppc/platforms/4xx/ep405.c
arch/ppc/platforms/83xx/mpc834x_sys.c
arch/ppc/platforms/85xx/mpc8540_ads.c
arch/ppc/platforms/85xx/mpc8560_ads.c
arch/ppc/platforms/85xx/mpc85xx_cds_common.c
arch/ppc/platforms/85xx/sbc8560.c
arch/ppc/platforms/85xx/stx_gp3.c
arch/ppc/platforms/85xx/tqm85xx.c
arch/ppc/platforms/mpc8272ads_setup.c
arch/ppc/platforms/mpc866ads_setup.c
arch/ppc/syslib/mpc8xx_devices.c
arch/s390/appldata/appldata_base.c
arch/s390/kernel/binfmt_elf32.c
arch/s390/kernel/kprobes.c
arch/s390/lib/uaccess_std.c
arch/sh/Kconfig
arch/sh/Kconfig.debug
arch/sh/Makefile
arch/sh/boards/renesas/r7780rp/Makefile
arch/sh/boards/renesas/r7780rp/irq.c
arch/sh/boards/renesas/r7780rp/psw.c [new file with mode: 0644]
arch/sh/boards/renesas/r7780rp/setup.c
arch/sh/boards/se/7206/Makefile [new file with mode: 0644]
arch/sh/boards/se/7206/io.c [new file with mode: 0644]
arch/sh/boards/se/7206/irq.c [new file with mode: 0644]
arch/sh/boards/se/7206/led.c [new file with mode: 0644]
arch/sh/boards/se/7206/setup.c [new file with mode: 0644]
arch/sh/boards/se/7619/Makefile [new file with mode: 0644]
arch/sh/boards/se/7619/io.c [new file with mode: 0644]
arch/sh/boards/se/7619/setup.c [new file with mode: 0644]
arch/sh/boards/titan/setup.c
arch/sh/boot/compressed/misc.c
arch/sh/configs/r7780rp_defconfig
arch/sh/configs/se7206_defconfig [new file with mode: 0644]
arch/sh/drivers/Kconfig [new file with mode: 0644]
arch/sh/drivers/Makefile
arch/sh/drivers/dma/Makefile
arch/sh/drivers/dma/dma-api.c
arch/sh/drivers/dma/dma-sh.c
arch/sh/drivers/dma/dma-sysfs.c
arch/sh/drivers/pci/ops-titan.c
arch/sh/drivers/pci/pci-sh7780.c
arch/sh/drivers/push-switch.c [new file with mode: 0644]
arch/sh/kernel/Makefile
arch/sh/kernel/cpu/Makefile
arch/sh/kernel/cpu/clock.c
arch/sh/kernel/cpu/init.c
arch/sh/kernel/cpu/irq/Makefile
arch/sh/kernel/cpu/irq/imask.c
arch/sh/kernel/cpu/irq/intc2.c
arch/sh/kernel/cpu/irq/ipr.c
arch/sh/kernel/cpu/sh2/Makefile
arch/sh/kernel/cpu/sh2/clock-sh7619.c [new file with mode: 0644]
arch/sh/kernel/cpu/sh2/entry.S [new file with mode: 0644]
arch/sh/kernel/cpu/sh2/ex.S [new file with mode: 0644]
arch/sh/kernel/cpu/sh2/probe.c
arch/sh/kernel/cpu/sh2/setup-sh7619.c [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/Makefile [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/clock-sh7206.c [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/probe.c [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/setup-sh7206.c [new file with mode: 0644]
arch/sh/kernel/cpu/sh3/Makefile
arch/sh/kernel/cpu/sh3/clock-sh7709.c
arch/sh/kernel/cpu/sh3/entry.S [moved from arch/sh/kernel/entry.S with 58% similarity]
arch/sh/kernel/cpu/sh4/Makefile
arch/sh/kernel/cpu/sh4/clock-sh4-202.c
arch/sh/kernel/cpu/sh4/clock-sh7780.c
arch/sh/kernel/cpu/sh4/fpu.c
arch/sh/kernel/cpu/sh4/probe.c
arch/sh/kernel/cpu/sh4/setup-sh7750.c
arch/sh/kernel/cpu/sh4/setup-sh7780.c
arch/sh/kernel/cpu/sh4/sq.c
arch/sh/kernel/early_printk.c
arch/sh/kernel/entry-common.S [new file with mode: 0644]
arch/sh/kernel/head.S
arch/sh/kernel/irq.c
arch/sh/kernel/process.c
arch/sh/kernel/relocate_kernel.S
arch/sh/kernel/setup.c
arch/sh/kernel/sh_ksyms.c
arch/sh/kernel/signal.c
arch/sh/kernel/stacktrace.c [new file with mode: 0644]
arch/sh/kernel/sys_sh.c
arch/sh/kernel/time.c
arch/sh/kernel/timers/Makefile
arch/sh/kernel/timers/timer-cmt.c [new file with mode: 0644]
arch/sh/kernel/timers/timer-mtu2.c [new file with mode: 0644]
arch/sh/kernel/timers/timer-tmu.c
arch/sh/kernel/timers/timer.c
arch/sh/kernel/traps.c
arch/sh/kernel/vsyscall/vsyscall.c
arch/sh/mm/Kconfig
arch/sh/mm/cache-sh2.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/clear_page.S
arch/sh/mm/copy_page.S
arch/sh/mm/fault.c
arch/sh/mm/hugetlbpage.c
arch/sh/mm/init.c
arch/sh/mm/ioremap.c
arch/sh/mm/pg-dma.c
arch/sh/mm/pg-sh4.c
arch/sh/mm/pmb.c
arch/sh/tools/mach-types
arch/sh64/kernel/setup.c
arch/sh64/kernel/signal.c
arch/sh64/mm/fault.c
arch/sh64/mm/hugetlbpage.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/mm/highmem.c
arch/sparc64/kernel/binfmt_elf32.c
arch/sparc64/kernel/vmlinux.lds.S
arch/sparc64/mm/hugetlbpage.c
arch/sparc64/mm/init.c
arch/sparc64/mm/tsb.c
arch/um/drivers/chan_kern.c
arch/um/drivers/daemon_kern.c
arch/um/drivers/line.c
arch/um/drivers/mcast_kern.c
arch/um/drivers/mconsole_kern.c
arch/um/drivers/net_kern.c
arch/um/drivers/pcap_kern.c
arch/um/drivers/port_kern.c
arch/um/drivers/slip_kern.c
arch/um/drivers/slirp_kern.c
arch/um/include/chan_kern.h
arch/um/include/line.h
arch/um/include/sysdep-i386/ptrace.h
arch/um/include/sysdep-i386/stub.h
arch/um/include/sysdep-x86_64/ptrace.h
arch/um/os-Linux/drivers/ethertap_kern.c
arch/um/os-Linux/drivers/tuntap_kern.c
arch/um/sys-i386/ldt.c
arch/um/sys-i386/ptrace_user.c
arch/um/sys-i386/user-offsets.c
arch/v850/kernel/vmlinux.lds.S
arch/x86_64/Kconfig
arch/x86_64/Makefile
arch/x86_64/defconfig
arch/x86_64/ia32/ia32_binfmt.c
arch/x86_64/ia32/ia32_signal.c
arch/x86_64/ia32/syscall32.c
arch/x86_64/kernel/apic.c
arch/x86_64/kernel/crash.c
arch/x86_64/kernel/early-quirks.c
arch/x86_64/kernel/entry.S
arch/x86_64/kernel/genapic.c
arch/x86_64/kernel/head64.c
arch/x86_64/kernel/i387.c
arch/x86_64/kernel/i8259.c
arch/x86_64/kernel/io_apic.c
arch/x86_64/kernel/irq.c
arch/x86_64/kernel/kprobes.c
arch/x86_64/kernel/mce.c
arch/x86_64/kernel/mce_amd.c
arch/x86_64/kernel/mpparse.c
arch/x86_64/kernel/nmi.c
arch/x86_64/kernel/pci-calgary.c
arch/x86_64/kernel/pci-dma.c
arch/x86_64/kernel/pci-gart.c
arch/x86_64/kernel/process.c
arch/x86_64/kernel/setup.c
arch/x86_64/kernel/smp.c
arch/x86_64/kernel/smpboot.c
arch/x86_64/kernel/time.c
arch/x86_64/kernel/traps.c
arch/x86_64/kernel/vmlinux.lds.S
arch/x86_64/kernel/vsyscall.c
arch/x86_64/lib/csum-partial.c
arch/x86_64/lib/delay.c
arch/x86_64/mm/fault.c
arch/x86_64/mm/init.c
arch/x86_64/mm/pageattr.c
block/Kconfig
block/as-iosched.c
block/blktrace.c
block/cfq-iosched.c
block/ll_rw_blk.c
block/scsi_ioctl.c
crypto/Kconfig
crypto/Makefile
crypto/api.c
crypto/cryptomgr.c
crypto/digest.c
crypto/gf128mul.c [new file with mode: 0644]
crypto/lrw.c [new file with mode: 0644]
crypto/tcrypt.c
crypto/tcrypt.h
crypto/xcbc.c [new file with mode: 0644]
drivers/Makefile
drivers/acpi/osl.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_pcmcia.c
drivers/ata/sata_promise.c
drivers/atm/Makefile
drivers/atm/he.c
drivers/atm/idt77252.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/dmapool.c
drivers/base/memory.c
drivers/base/topology.c
drivers/block/DAC960.c
drivers/block/Kconfig
drivers/block/aoe/aoe.h
drivers/block/aoe/aoeblk.c
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoedev.c
drivers/block/cciss.c
drivers/block/cciss.h
drivers/block/cciss_cmd.h
drivers/block/floppy.c
drivers/block/nbd.c
drivers/block/paride/aten.c
drivers/block/paride/bpck.c
drivers/block/paride/bpck6.c
drivers/block/paride/comm.c
drivers/block/paride/dstr.c
drivers/block/paride/epat.c
drivers/block/paride/epia.c
drivers/block/paride/fit2.c
drivers/block/paride/fit3.c
drivers/block/paride/friq.c
drivers/block/paride/frpw.c
drivers/block/paride/jumbo [deleted file]
drivers/block/paride/kbic.c
drivers/block/paride/ktti.c
drivers/block/paride/on20.c
drivers/block/paride/on26.c
drivers/block/paride/paride.c
drivers/block/paride/paride.h
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/paride/pg.c
drivers/block/paride/pseudo.h
drivers/block/paride/pt.c
drivers/block/pktcdvd.c
drivers/block/sx8.c
drivers/block/ub.c
drivers/block/viodasd.c
drivers/bluetooth/bcm203x.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_bcsp.c
drivers/cdrom/optcd.c
drivers/cdrom/sbpcd.c
drivers/char/agp/amd64-agp.c
drivers/char/cyclades.c
drivers/char/decserial.c
drivers/char/drm/drm_sman.c
drivers/char/drm/drm_vm.c
drivers/char/drm/via_dmablit.c
drivers/char/epca.c
drivers/char/esp.c
drivers/char/genrtc.c
drivers/char/hvc_console.c
drivers/char/hvcs.c
drivers/char/hvsi.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/ip2/i2cmd.h
drivers/char/ip2/i2lib.c
drivers/char/ip2/ip2main.c
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_kcs_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_poweroff.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_smic_sm.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/isicom.c
drivers/char/istallion.c
drivers/char/misc.c
drivers/char/mmtimer.c
drivers/char/moxa.c
drivers/char/mxser.c
drivers/char/pcmcia/cm4000_cs.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/random.c
drivers/char/rio/rio_linux.c
drivers/char/riscom8.c
drivers/char/serial167.c
drivers/char/sonypi.c
drivers/char/specialix.c
drivers/char/stallion.c
drivers/char/synclink.c
drivers/char/synclink_gt.c
drivers/char/synclinkmp.c
drivers/char/sysrq.c
drivers/char/toshiba.c
drivers/char/tpm/tpm.c
drivers/char/tty_io.c
drivers/char/vt.c
drivers/char/watchdog/pcwd_usb.c
drivers/connector/cn_queue.c
drivers/connector/connector.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/geode-aes.c [new file with mode: 0644]
drivers/crypto/geode-aes.h [new file with mode: 0644]
drivers/dma/ioatdma.c
drivers/edac/edac_mc.c
drivers/i2c/chips/ds1374.c
drivers/i2c/chips/m41t00.c
drivers/ide/Kconfig
drivers/ide/ide.c
drivers/ide/legacy/ide-cs.c
drivers/ide/pci/via82cxxx.c
drivers/ieee1394/eth1394.c
drivers/ieee1394/hosts.c
drivers/ieee1394/hosts.h
drivers/ieee1394/nodemgr.c
drivers/ieee1394/ohci1394.c
drivers/ieee1394/pcilynx.c
drivers/ieee1394/raw1394.c
drivers/ieee1394/sbp2.c
drivers/ieee1394/sbp2.h
drivers/infiniband/core/addr.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad_priv.h
drivers/infiniband/core/mad_rmpp.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/uverbs_mem.c
drivers/infiniband/hw/amso1100/c2_vq.c
drivers/infiniband/hw/ehca/ehca_av.c
drivers/infiniband/hw/ehca/ehca_cq.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_mrmw.c
drivers/infiniband/hw/ehca/ehca_pd.c
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/mthca/mthca_av.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/gameport/gameport.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/lkkbd.c
drivers/input/keyboard/sunkbd.c
drivers/input/misc/hp_sdc_rtc.c
drivers/input/mouse/psmouse-base.c
drivers/input/serio/libps2.c
drivers/input/serio/serio.c
drivers/input/touchscreen/ads7846.c
drivers/isdn/act2000/capi.c
drivers/isdn/act2000/capi.h
drivers/isdn/act2000/module.c
drivers/isdn/capi/kcapi.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/gigaset/usb-gigaset.c
drivers/isdn/hardware/avm/avm_cs.c
drivers/isdn/hisax/amd7930_fn.c
drivers/isdn/hisax/avma1_cs.c
drivers/isdn/hisax/config.c
drivers/isdn/hisax/elsa_cs.c
drivers/isdn/hisax/hfc4s8s_l1.c
drivers/isdn/hisax/hfc_2bds0.c
drivers/isdn/hisax/hfc_pci.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/icc.c
drivers/isdn/hisax/isac.c
drivers/isdn/hisax/isar.c
drivers/isdn/hisax/isdnhdlc.h
drivers/isdn/hisax/isdnl1.c
drivers/isdn/hisax/sedlbauer_cs.c
drivers/isdn/hisax/teles_cs.c
drivers/isdn/hisax/w6692.c
drivers/isdn/hysdn/boardergo.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/pcbit/drv.c
drivers/isdn/pcbit/layer2.c
drivers/isdn/pcbit/pcbit.h
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/leds-wrap.c [new file with mode: 0644]
drivers/macintosh/Kconfig
drivers/macintosh/Makefile
drivers/macintosh/adb.c
drivers/macintosh/apm_emu.c
drivers/macintosh/rack-meter.c [new file with mode: 0644]
drivers/macintosh/smu.c
drivers/macintosh/therm_adt746x.c
drivers/macintosh/therm_pm72.c
drivers/macintosh/therm_windtunnel.c
drivers/macintosh/via-pmu.c
drivers/macintosh/windfarm_core.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/md/kcopyd.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/dvb/b2c2/flexcop-pci.c
drivers/media/dvb/cinergyT2/cinergyT2.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/media/dvb/dvb-usb/dvb-usb-remote.c
drivers/media/dvb/dvb-usb/dvb-usb.h
drivers/media/dvb/dvb-usb/usb-urb.c
drivers/media/dvb/frontends/l64781.c
drivers/media/dvb/ttusb-dec/ttusb_dec.c
drivers/media/radio/Kconfig
drivers/media/video/cpia_pp.c
drivers/media/video/cx88/cx88-input.c
drivers/media/video/ir-kbd-i2c.c
drivers/media/video/msp3400-driver.c
drivers/media/video/pvrusb2/pvrusb2-context.c
drivers/media/video/saa6588.c
drivers/media/video/saa7134/saa7134-empress.c
drivers/media/video/tvaudio.c
drivers/media/video/video-buf-dvb.c
drivers/media/video/vivi.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptlan.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptspi.c
drivers/message/i2o/bus-osm.c
drivers/message/i2o/device.c
drivers/message/i2o/driver.c
drivers/message/i2o/exec-osm.c
drivers/message/i2o/i2o_block.c
drivers/message/i2o/i2o_block.h
drivers/message/i2o/i2o_config.c
drivers/message/i2o/i2o_proc.c
drivers/message/i2o/i2o_scsi.c
drivers/message/i2o/pci.c
drivers/mfd/ucb1x00-ts.c
drivers/misc/tifm_7xx1.c
drivers/misc/tifm_core.c
drivers/mmc/mmc.c
drivers/mmc/mmc.h
drivers/mmc/mmc_sysfs.c
drivers/mmc/tifm_sd.c
drivers/mtd/devices/m25p80.c
drivers/net/3c501.c
drivers/net/3c503.c
drivers/net/3c505.c
drivers/net/3c507.c
drivers/net/3c523.c
drivers/net/3c527.c
drivers/net/8139too.c
drivers/net/ac3200.c
drivers/net/apne.c
drivers/net/appletalk/cops.c
drivers/net/arm/at91_ether.c
drivers/net/arm/at91_ether.h
drivers/net/arm/ether1.c
drivers/net/arm/ether3.c
drivers/net/at1700.c
drivers/net/atarilance.c
drivers/net/bnx2.c
drivers/net/bonding/bond_main.c
drivers/net/cassini.c
drivers/net/chelsio/common.h
drivers/net/chelsio/cphy.h
drivers/net/chelsio/cxgb2.c
drivers/net/chelsio/my3126.c
drivers/net/cs89x0.c
drivers/net/de600.c
drivers/net/declance.c
drivers/net/e100.c
drivers/net/e1000/e1000_main.c
drivers/net/e2100.c
drivers/net/eepro.c
drivers/net/eexpress.c
drivers/net/ehea/ehea_main.c
drivers/net/es3210.c
drivers/net/eth16i.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/dmascc.c
drivers/net/hp-plus.c
drivers/net/hp.c
drivers/net/ibm_emac/ibm_emac_mal.h
drivers/net/ibmveth.c
drivers/net/ibmveth.h
drivers/net/irda/mcs7780.c
drivers/net/irda/pxaficp_ir.c
drivers/net/irda/sir-dev.h
drivers/net/irda/sir_dev.c
drivers/net/irda/stir4200.c
drivers/net/iseries_veth.c
drivers/net/ixgb/ixgb_main.c
drivers/net/lance.c
drivers/net/lasi_82596.c
drivers/net/lne390.c
drivers/net/mv643xx_eth.c
drivers/net/mvme147.c
drivers/net/myri10ge/myri10ge.c
drivers/net/myri10ge/myri10ge_mcp.h
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
drivers/net/ne.c
drivers/net/ne2.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_ethtool.c
drivers/net/netxen/netxen_nic_hdr.h
drivers/net/netxen/netxen_nic_hw.c
drivers/net/netxen/netxen_nic_hw.h
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_ioctl.h
drivers/net/netxen/netxen_nic_isr.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/netxen/netxen_nic_niu.c
drivers/net/netxen/netxen_nic_phan_reg.h
drivers/net/ni52.c
drivers/net/ni65.c
drivers/net/ns83820.c
drivers/net/pcmcia/3c574_cs.c
drivers/net/pcmcia/3c589_cs.c
drivers/net/pcmcia/axnet_cs.c
drivers/net/pcmcia/com20020_cs.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/pcmcia/ibmtr_cs.c
drivers/net/pcmcia/nmclan_cs.c
drivers/net/pcmcia/pcnet_cs.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/pcmcia/xirc2ps_cs.c
drivers/net/phy/fixed.c
drivers/net/phy/phy.c
drivers/net/plip.c
drivers/net/qla3xxx.c
drivers/net/qla3xxx.h
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/s2io.h
drivers/net/seeq8005.c
drivers/net/sis190.c
drivers/net/sk98lin/skgesirq.c
drivers/net/skge.c
drivers/net/skge.h
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/smc-ultra.c
drivers/net/smc-ultra32.c
drivers/net/smc9194.c
drivers/net/smc91x.c
drivers/net/smc91x.h
drivers/net/spider_net.c
drivers/net/sun3lance.c
drivers/net/sungem.c
drivers/net/tg3.c
drivers/net/tg3.h
drivers/net/tlan.c
drivers/net/tlan.h
drivers/net/tokenring/ibmtr.c
drivers/net/tokenring/smctr.c
drivers/net/tulip/21142.c
drivers/net/tulip/de4x5.c
drivers/net/tulip/timer.c
drivers/net/tulip/tulip.h
drivers/net/tulip/tulip_core.c
drivers/net/wan/pc300_tty.c
drivers/net/wd.c
drivers/net/wireless/airo.c
drivers/net/wireless/airo_cs.c
drivers/net/wireless/atmel_cs.c
drivers/net/wireless/bcm43xx/bcm43xx.h
drivers/net/wireless/bcm43xx/bcm43xx_main.c
drivers/net/wireless/hostap/hostap.h
drivers/net/wireless/hostap/hostap_ap.c
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/hostap/hostap_download.c
drivers/net/wireless/hostap/hostap_hw.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/hostap/hostap_main.c
drivers/net/wireless/hostap/hostap_pci.c
drivers/net/wireless/hostap/hostap_plx.c
drivers/net/wireless/ipw2100.c
drivers/net/wireless/ipw2100.h
drivers/net/wireless/ipw2200.c
drivers/net/wireless/ipw2200.h
drivers/net/wireless/netwave_cs.c
drivers/net/wireless/orinoco.c
drivers/net/wireless/orinoco_cs.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/isl_ioctl.h
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/islpci_eth.c
drivers/net/wireless/prism54/islpci_eth.h
drivers/net/wireless/prism54/islpci_mgt.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/spectrum_cs.c
drivers/net/wireless/wavelan_cs.c
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/net/wireless/zd1211rw/zd_chip.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/wireless/zd1211rw/zd_mac.h
drivers/net/wireless/zd1211rw/zd_netdev.c
drivers/oprofile/cpu_buffer.c
drivers/oprofile/cpu_buffer.h
drivers/parport/parport_cs.c
drivers/parport/parport_pc.c
drivers/pci/hotplug/shpchp.h
drivers/pci/hotplug/shpchp_core.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/msi.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/probe.c
drivers/pcmcia/at91_cf.c
drivers/pcmcia/cs.c
drivers/pcmcia/cs_internal.h
drivers/pcmcia/ds.c
drivers/pcmcia/m32r_cfc.c
drivers/pcmcia/pcmcia_ioctl.c
drivers/pcmcia/pd6729.c
drivers/pcmcia/socket_sysfs.c
drivers/pnp/card.c
drivers/pnp/interface.c
drivers/pnp/pnpbios/core.c
drivers/ps3/Makefile [new file with mode: 0644]
drivers/ps3/system-bus.c [new file with mode: 0644]
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-dev.c
drivers/rtc/rtc-ds1672.c
drivers/rtc/rtc-ds1742.c
drivers/rtc/rtc-omap.c [new file with mode: 0644]
drivers/rtc/rtc-rs5c372.c
drivers/rtc/rtc-test.c
drivers/rtc/rtc-x1205.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/css.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_main.c
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/53c700.c
drivers/scsi/53c700.h
drivers/scsi/BusLogic.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/NCR53c406a.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/aha152x.c
drivers/scsi/aha1740.c
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic79xx_pci.h
drivers/scsi/aic94xx/aic94xx.h
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/aic94xx/aic94xx_scb.c
drivers/scsi/fd_mcs.c
drivers/scsi/hosts.c
drivers/scsi/ibmvscsi/Makefile
drivers/scsi/ibmvscsi/ibmvstgt.c [new file with mode: 0644]
drivers/scsi/ide-scsi.c
drivers/scsi/imm.c
drivers/scsi/initio.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/ips.c
drivers/scsi/ips.h
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_event.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_phy.c
drivers/scsi/libsas/sas_port.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/libsrp.c [new file with mode: 0644]
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid.c
drivers/scsi/megaraid.h
drivers/scsi/megaraid/megaraid_sas.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/oktagon_esp.c
drivers/scsi/pcmcia/aha152x_stub.c
drivers/scsi/pcmcia/fdomain_stub.c
drivers/scsi/pcmcia/nsp_cs.c
drivers/scsi/pcmcia/qlogic_stub.c
drivers/scsi/pcmcia/sym53c500_cs.c
drivers/scsi/ppa.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla4xxx/ql4_dbg.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_inline.h
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_nvram.c
drivers/scsi/qla4xxx/ql4_nvram.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_tgt_if.c [new file with mode: 0644]
drivers/scsi/scsi_tgt_lib.c [new file with mode: 0644]
drivers/scsi/scsi_tgt_priv.h [new file with mode: 0644]
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/scsi_wait_scan.c [new file with mode: 0644]
drivers/scsi/sd.c
drivers/scsi/st.c
drivers/scsi/stex.c
drivers/scsi/t128.h
drivers/serial/8250_exar_st16c554.c [new file with mode: 0644]
drivers/serial/8250_pnp.c
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/amba-pl010.c
drivers/serial/cpm_uart/cpm_uart_cpm2.c
drivers/serial/dz.c
drivers/serial/dz.h
drivers/serial/mcfserial.c
drivers/serial/mpc52xx_uart.c
drivers/serial/mpsc.c
drivers/serial/serial_cs.c
drivers/serial/sh-sci.c
drivers/serial/sh-sci.h
drivers/serial/uartlite.c [new file with mode: 0644]
drivers/spi/pxa2xx_spi.c
drivers/spi/spi.c
drivers/spi/spi_bitbang.c
drivers/spi/spi_butterfly.c
drivers/telephony/ixj_pcmcia.c
drivers/usb/atm/cxacru.c
drivers/usb/atm/speedtch.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/buffer.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/core/usb.c
drivers/usb/gadget/ether.c
drivers/usb/gadget/file_storage.c
drivers/usb/gadget/gmidi.c
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/zero.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/hc_crisv10.c
drivers/usb/host/ohci-dbg.c
drivers/usb/host/ohci-pnx4008.c
drivers/usb/host/sl811_cs.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/input/acecad.c
drivers/usb/input/aiptek.c
drivers/usb/input/ati_remote.c
drivers/usb/input/hid-core.c
drivers/usb/input/keyspan_remote.c
drivers/usb/input/mtouchusb.c
drivers/usb/input/powermate.c
drivers/usb/input/touchkitusb.c
drivers/usb/input/usbkbd.c
drivers/usb/input/usbmouse.c
drivers/usb/input/usbtouchscreen.c
drivers/usb/input/xpad.c
drivers/usb/input/yealink.c
drivers/usb/misc/appledisplay.c
drivers/usb/misc/ftdi-elan.c
drivers/usb/misc/phidgetkit.c
drivers/usb/misc/phidgetmotorcontrol.c
drivers/usb/misc/usbtest.c
drivers/usb/mon/mon_text.c
drivers/usb/net/catc.c
drivers/usb/net/kaweth.c
drivers/usb/net/net1080.c
drivers/usb/net/pegasus.c
drivers/usb/net/pegasus.h
drivers/usb/net/rndis_host.c
drivers/usb/net/rtl8150.c
drivers/usb/net/usbnet.c
drivers/usb/serial/aircable.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/whiteheat.c
drivers/usb/storage/onetouch.c
drivers/usb/storage/transport.c
drivers/usb/storage/usb.c
drivers/video/console/fbcon.c
drivers/video/geode/gxfb_core.c
drivers/video/platinumfb.c
drivers/video/pxafb.c
drivers/w1/Makefile
drivers/w1/slaves/Makefile
drivers/w1/slaves/w1_ds2433.c
drivers/w1/w1.c
fs/9p/mux.c
fs/9p/vfs_inode.c
fs/adfs/super.c
fs/affs/amigaffs.c
fs/affs/bitmap.c
fs/affs/super.c
fs/afs/kafsasyncd.c
fs/afs/kafstimod.c
fs/afs/server.c
fs/afs/super.c
fs/aio.c
fs/autofs/inode.c
fs/autofs4/inode.c
fs/befs/linuxvfs.c
fs/bfs/inode.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/bio.c
fs/block_dev.c
fs/buffer.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/misc.c
fs/cifs/transport.c
fs/coda/inode.c
fs/compat.c
fs/compat_ioctl.c
fs/configfs/configfs_internal.h
fs/configfs/dir.c
fs/configfs/mount.c
fs/cramfs/inode.c
fs/dcache.c
fs/dcookies.c
fs/dlm/Kconfig
fs/dlm/Makefile
fs/dlm/dlm_internal.h
fs/dlm/lock.c
fs/dlm/lockspace.c
fs/dlm/lowcomms-sctp.c [moved from fs/dlm/lowcomms.c with 87% similarity]
fs/dlm/lowcomms-tcp.c [new file with mode: 0644]
fs/dlm/lowcomms.h
fs/dlm/main.c
fs/dlm/member.c
fs/dlm/memory.c
fs/dlm/rcom.c
fs/dlm/recover.c
fs/dlm/recoverd.c
fs/dlm/requestqueue.c
fs/dlm/requestqueue.h
fs/dnotify.c
fs/dquot.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ecryptfs/super.c
fs/efs/super.c
fs/eventpoll.c
fs/exec.c
fs/ext2/ioctl.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext3/Makefile
fs/ext3/balloc.c
fs/ext3/dir.c
fs/ext3/ext3_jbd.c [new file with mode: 0644]
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext4/Makefile
fs/ext4/balloc.c
fs/ext4/dir.c
fs/ext4/ext4_jbd2.c [new file with mode: 0644]
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fat/cache.c
fs/fat/inode.c
fs/fcntl.c
fs/file.c
fs/freevxfs/vxfs_inode.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/Kconfig
fs/gfs2/acl.c
fs/gfs2/acl.h
fs/gfs2/bmap.c
fs/gfs2/daemon.c
fs/gfs2/dir.c
fs/gfs2/dir.h
fs/gfs2/eaops.c
fs/gfs2/eattr.c
fs/gfs2/eattr.h
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/main.c
fs/gfs2/meta_io.c
fs/gfs2/meta_io.h
fs/gfs2/ondisk.c
fs/gfs2/ops_address.c
fs/gfs2/ops_dentry.c
fs/gfs2/ops_export.c
fs/gfs2/ops_export.h
fs/gfs2/ops_file.c
fs/gfs2/ops_file.h
fs/gfs2/ops_fstype.c
fs/gfs2/ops_inode.c
fs/gfs2/ops_super.c
fs/gfs2/ops_vm.c
fs/gfs2/quota.c
fs/gfs2/recovery.c
fs/gfs2/recovery.h
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/gfs2/super.h
fs/gfs2/sys.c
fs/gfs2/util.c
fs/gfs2/util.h
fs/hfs/super.c
fs/hfsplus/super.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/ea.c
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/map.c
fs/hpfs/super.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/inotify_user.c
fs/isofs/inode.c
fs/jbd/journal.c
fs/jbd/revoke.c
fs/jbd/transaction.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs/inode-v23.c
fs/jffs/intrep.c
fs/jffs/jffs_fm.c
fs/jffs2/background.c
fs/jffs2/malloc.c
fs/jffs2/super.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/jfs/jfs_txnmgr.c
fs/jfs/super.c
fs/lockd/clntproc.c
fs/lockd/host.c
fs/lockd/svc4proc.c
fs/lockd/svcproc.c
fs/locks.c
fs/mbcache.c
fs/minix/inode.c
fs/namei.c
fs/namespace.c
fs/ncpfs/inode.c
fs/ncpfs/sock.c
fs/nfs/client.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4renewd.c
fs/nfs/pagelist.c
fs/nfs/proc.c
fs/nfs/read.c
fs/nfs/symlink.c
fs/nfs/write.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsxdr.c
fs/nls/nls_cp936.c
fs/ntfs/attrib.c
fs/ntfs/index.c
fs/ntfs/inode.c
fs/ntfs/unistr.c
fs/ocfs2/alloc.c
fs/ocfs2/alloc.h
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/cluster/quorum.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/cluster/tcp_internal.h
fs/ocfs2/dir.c
fs/ocfs2/dir.h
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmfs.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/userdlm.c
fs/ocfs2/dlmglue.c
fs/ocfs2/dlmglue.h
fs/ocfs2/export.c
fs/ocfs2/extent_map.c
fs/ocfs2/file.c
fs/ocfs2/file.h
fs/ocfs2/inode.c
fs/ocfs2/inode.h
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/journal.h
fs/ocfs2/localalloc.c
fs/ocfs2/localalloc.h
fs/ocfs2/mmap.c
fs/ocfs2/namei.c
fs/ocfs2/namei.h
fs/ocfs2/ocfs2.h
fs/ocfs2/suballoc.c
fs/ocfs2/suballoc.h
fs/ocfs2/super.c
fs/ocfs2/symlink.c
fs/ocfs2/uptodate.c
fs/openpromfs/inode.c
fs/partitions/amiga.c
fs/partitions/atari.c
fs/partitions/check.c
fs/partitions/ibm.c
fs/pipe.c
fs/proc/Makefile
fs/proc/base.c
fs/proc/inode.c
fs/proc/kcore.c
fs/proc/proc_misc.c
fs/qnx4/inode.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/reiserfs/journal.c
fs/reiserfs/super.c
fs/romfs/inode.c
fs/seq_file.c
fs/smbfs/inode.c
fs/smbfs/request.c
fs/stat.c
fs/sysfs/mount.c
fs/sysfs/sysfs.h
fs/sysv/CHANGES [deleted file]
fs/sysv/ChangeLog [deleted file]
fs/sysv/INTRO [deleted file]
fs/sysv/inode.c
fs/udf/super.c
fs/ufs/super.c
fs/ufs/util.h
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_super.c
include/acpi/platform/aclinux.h
include/asm-alpha/dma-mapping.h
include/asm-alpha/unistd.h
include/asm-arm/arch-omap/irda.h
include/asm-arm/dma-mapping.h
include/asm-arm/setup.h
include/asm-arm/unistd.h
include/asm-arm26/pgalloc.h
include/asm-arm26/setup.h
include/asm-arm26/unistd.h
include/asm-avr32/dma-mapping.h
include/asm-avr32/setup.h
include/asm-avr32/types.h
include/asm-cris/arch-v10/bitops.h
include/asm-cris/dma-mapping.h
include/asm-cris/semaphore-helper.h
include/asm-frv/dma-mapping.h
include/asm-frv/highmem.h
include/asm-frv/param.h
include/asm-frv/setup.h
include/asm-frv/unistd.h
include/asm-generic/Kbuild
include/asm-generic/Kbuild.asm
include/asm-generic/atomic.h
include/asm-generic/dma-mapping.h
include/asm-generic/futex.h
include/asm-generic/vmlinux.lds.h
include/asm-h8300/delay.h
include/asm-h8300/mmu_context.h
include/asm-h8300/pci.h
include/asm-h8300/tlbflush.h
include/asm-h8300/types.h
include/asm-h8300/unistd.h
include/asm-i386/Kbuild
include/asm-i386/alternative.h
include/asm-i386/apic.h
include/asm-i386/atomic.h
include/asm-i386/boot.h
include/asm-i386/bugs.h
include/asm-i386/cpu.h
include/asm-i386/cpufeature.h
include/asm-i386/current.h
include/asm-i386/delay.h
include/asm-i386/desc.h
include/asm-i386/dma-mapping.h
include/asm-i386/e820.h
include/asm-i386/elf.h
include/asm-i386/futex.h
include/asm-i386/genapic.h
include/asm-i386/i387.h
include/asm-i386/io.h
include/asm-i386/irq.h
include/asm-i386/irq_regs.h
include/asm-i386/irqflags.h
include/asm-i386/mach-default/setup_arch.h
include/asm-i386/math_emu.h
include/asm-i386/mmu_context.h
include/asm-i386/mmzone.h
include/asm-i386/module.h
include/asm-i386/mpspec_def.h
include/asm-i386/msr.h
include/asm-i386/nmi.h
include/asm-i386/page.h
include/asm-i386/param.h
include/asm-i386/paravirt.h [new file with mode: 0644]
include/asm-i386/pda.h [new file with mode: 0644]
include/asm-i386/percpu.h
include/asm-i386/pgtable-2level.h
include/asm-i386/pgtable-3level.h
include/asm-i386/pgtable.h
include/asm-i386/processor.h
include/asm-i386/ptrace.h
include/asm-i386/rwsem.h
include/asm-i386/segment.h
include/asm-i386/setup.h
include/asm-i386/smp.h
include/asm-i386/spinlock.h
include/asm-i386/spinlock_types.h
include/asm-i386/suspend.h
include/asm-i386/system.h
include/asm-i386/thread_info.h
include/asm-i386/time.h [new file with mode: 0644]
include/asm-i386/tlbflush.h
include/asm-i386/types.h
include/asm-i386/unistd.h
include/asm-i386/unwind.h
include/asm-i386/vm86.h
include/asm-ia64/Kbuild
include/asm-ia64/dma-mapping.h
include/asm-ia64/futex.h
include/asm-ia64/pgalloc.h
include/asm-m32r/setup.h
include/asm-m32r/unistd.h
include/asm-m68k/dma-mapping.h
include/asm-m68k/setup.h
include/asm-m68k/unistd.h
include/asm-m68knommu/irq.h
include/asm-m68knommu/rtc.h [new file with mode: 0644]
include/asm-m68knommu/setup.h
include/asm-m68knommu/ucontext.h
include/asm-m68knommu/unistd.h
include/asm-mips/atomic.h
include/asm-mips/barrier.h [new file with mode: 0644]
include/asm-mips/bitops.h
include/asm-mips/compat.h
include/asm-mips/dma-mapping.h
include/asm-mips/futex.h
include/asm-mips/highmem.h
include/asm-mips/i8259.h
include/asm-mips/pgtable-32.h
include/asm-mips/pgtable-64.h
include/asm-mips/setup.h
include/asm-mips/sn/klconfig.h
include/asm-mips/spinlock.h
include/asm-mips/system.h
include/asm-mips/types.h
include/asm-mips/unistd.h
include/asm-parisc/dma-mapping.h
include/asm-parisc/futex.h
include/asm-powerpc/Kbuild
include/asm-powerpc/cell-pmu.h [new file with mode: 0644]
include/asm-powerpc/cputable.h
include/asm-powerpc/dbdma.h
include/asm-powerpc/dcr-mmio.h [new file with mode: 0644]
include/asm-powerpc/dcr-native.h [new file with mode: 0644]
include/asm-powerpc/dcr.h [new file with mode: 0644]
include/asm-powerpc/device.h
include/asm-powerpc/dma-mapping.h
include/asm-powerpc/eeh.h
include/asm-powerpc/elf.h
include/asm-powerpc/firmware.h
include/asm-powerpc/futex.h
include/asm-powerpc/hw_irq.h
include/asm-powerpc/ibmebus.h
include/asm-powerpc/ide.h
include/asm-powerpc/immap_qe.h
include/asm-powerpc/io-defs.h [new file with mode: 0644]
include/asm-powerpc/io.h
include/asm-powerpc/iommu.h
include/asm-powerpc/irq.h
include/asm-powerpc/iseries/iommu.h
include/asm-powerpc/lv1call.h [new file with mode: 0644]
include/asm-powerpc/machdep.h
include/asm-powerpc/mmu.h
include/asm-powerpc/mpc52xx.h [new file with mode: 0644]
include/asm-powerpc/mpc85xx.h
include/asm-powerpc/mpic.h
include/asm-powerpc/of_device.h
include/asm-powerpc/of_platform.h [new file with mode: 0644]
include/asm-powerpc/oprofile_impl.h
include/asm-powerpc/paca.h
include/asm-powerpc/pci-bridge.h
include/asm-powerpc/pci.h
include/asm-powerpc/pgalloc.h
include/asm-powerpc/ppc-pci.h
include/asm-powerpc/processor.h
include/asm-powerpc/prom.h
include/asm-powerpc/ps3.h [new file with mode: 0644]
include/asm-powerpc/rtas.h
include/asm-powerpc/setup.h
include/asm-powerpc/sparsemem.h
include/asm-powerpc/spu.h
include/asm-powerpc/spu_csa.h
include/asm-powerpc/spu_info.h [new file with mode: 0644]
include/asm-powerpc/spu_priv1.h
include/asm-powerpc/todc.h [deleted file]
include/asm-powerpc/topology.h
include/asm-powerpc/tsi108.h
include/asm-powerpc/types.h
include/asm-powerpc/uaccess.h
include/asm-powerpc/unistd.h
include/asm-powerpc/vio.h
include/asm-powerpc/xmon.h
include/asm-ppc/highmem.h
include/asm-ppc/io.h
include/asm-ppc/m48t35.h
include/asm-ppc/mpc52xx.h
include/asm-ppc/mpc83xx.h
include/asm-ppc/mpc85xx.h
include/asm-ppc/pci-bridge.h
include/asm-s390/setup.h
include/asm-s390/types.h
include/asm-s390/unistd.h
include/asm-sh/atomic.h
include/asm-sh/bugs.h
include/asm-sh/clock.h
include/asm-sh/cpu-sh2/cache.h
include/asm-sh/cpu-sh2/freq.h [new file with mode: 0644]
include/asm-sh/cpu-sh2/mmu_context.h [new file with mode: 0644]
include/asm-sh/cpu-sh2/timer.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/addrspace.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/cache.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/cacheflush.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/dma.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/freq.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/mmu_context.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/timer.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/ubc.h [new file with mode: 0644]
include/asm-sh/cpu-sh2a/watchdog.h [new file with mode: 0644]
include/asm-sh/dma-mapping.h
include/asm-sh/dma.h
include/asm-sh/elf.h
include/asm-sh/entry-macros.S [new file with mode: 0644]
include/asm-sh/irq-sh73180.h [deleted file]
include/asm-sh/irq-sh7343.h [deleted file]
include/asm-sh/irq-sh7780.h [deleted file]
include/asm-sh/irq.h
include/asm-sh/irqflags.h [new file with mode: 0644]
include/asm-sh/mmu_context.h
include/asm-sh/page.h
include/asm-sh/pgalloc.h
include/asm-sh/pgtable-2level.h [deleted file]
include/asm-sh/pgtable.h
include/asm-sh/processor.h
include/asm-sh/push-switch.h [new file with mode: 0644]
include/asm-sh/rwsem.h
include/asm-sh/se7206.h [new file with mode: 0644]
include/asm-sh/setup.h
include/asm-sh/system.h
include/asm-sh/thread_info.h
include/asm-sh/timer.h
include/asm-sh/titan.h
include/asm-sh/types.h
include/asm-sh/unistd.h
include/asm-sh64/dma-mapping.h
include/asm-sh64/setup.h
include/asm-sh64/unistd.h
include/asm-sparc/unistd.h
include/asm-sparc64/dma-mapping.h
include/asm-sparc64/futex.h
include/asm-sparc64/pgalloc.h
include/asm-sparc64/unistd.h
include/asm-um/dma-mapping.h
include/asm-v850/irq.h
include/asm-v850/unistd.h
include/asm-x86_64/Kbuild
include/asm-x86_64/alternative.h
include/asm-x86_64/atomic.h
include/asm-x86_64/calgary.h
include/asm-x86_64/cpufeature.h
include/asm-x86_64/delay.h
include/asm-x86_64/desc.h
include/asm-x86_64/desc_defs.h [new file with mode: 0644]
include/asm-x86_64/dma-mapping.h
include/asm-x86_64/futex.h
include/asm-x86_64/genapic.h
include/asm-x86_64/msr.h
include/asm-x86_64/nmi.h
include/asm-x86_64/pci-direct.h
include/asm-x86_64/pgtable.h
include/asm-x86_64/processor.h
include/asm-x86_64/proto.h
include/asm-x86_64/rio.h [new file with mode: 0644]
include/asm-x86_64/smp.h
include/asm-x86_64/spinlock.h
include/asm-x86_64/spinlock_types.h
include/asm-x86_64/stacktrace.h
include/asm-x86_64/types.h
include/asm-x86_64/unistd.h
include/asm-x86_64/unwind.h
include/asm-x86_64/vsyscall.h
include/asm-xtensa/dma-mapping.h
include/asm-xtensa/unistd.h
include/crypto/b128ops.h [new file with mode: 0644]
include/crypto/gf128mul.h [new file with mode: 0644]
include/linux/Kbuild
include/linux/aio.h
include/linux/audit.h
include/linux/bootmem.h
include/linux/bottom_half.h [new file with mode: 0644]
include/linux/carta_random32.h [deleted file]
include/linux/cciss_ioctl.h
include/linux/cdev.h
include/linux/connector.h
include/linux/cpu.h
include/linux/cpuset.h
include/linux/crypto.h
include/linux/debug_locks.h
include/linux/delayacct.h
include/linux/device.h
include/linux/efi.h
include/linux/elf.h
include/linux/ext3_jbd.h
include/linux/ext4_jbd2.h
include/linux/file.h
include/linux/freezer.h [new file with mode: 0644]
include/linux/fs.h
include/linux/fs_struct.h
include/linux/fuse.h
include/linux/genetlink.h
include/linux/gfp.h
include/linux/gfs2_ondisk.h
include/linux/highmem.h
include/linux/hugetlb.h
include/linux/i2o.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/ipmi.h
include/linux/ipmi_msgdefs.h
include/linux/ipmi_smi.h
include/linux/jbd.h
include/linux/jbd2.h
include/linux/kbd_kern.h
include/linux/kexec.h
include/linux/kprobes.h
include/linux/ktime.h
include/linux/libata.h
include/linux/lockd/lockd.h
include/linux/lockdep.h
include/linux/mm.h
include/linux/mmc/host.h
include/linux/mmzone.h
include/linux/moduleparam.h
include/linux/msg.h
include/linux/mutex.h
include/linux/nbd.h
include/linux/ncp_fs_sb.h
include/linux/netfilter/nf_conntrack_pptp.h
include/linux/netpoll.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/nfs_page.h
include/linux/nfs_xdr.h
include/linux/nmi.h
include/linux/pci_ids.h
include/linux/pfkeyv2.h
include/linux/profile.h
include/linux/quotaops.h
include/linux/radix-tree.h
include/linux/raid/raid5.h
include/linux/reiserfs_fs.h
include/linux/reiserfs_fs_sb.h
include/linux/relay.h
include/linux/rmap.h
include/linux/rtmutex.h
include/linux/rwsem-spinlock.h
include/linux/sched.h
include/linux/screen_info.h
include/linux/seq_file.h
include/linux/serial_8250.h
include/linux/serial_core.h
include/linux/signal.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/smp.h
include/linux/spinlock.h
include/linux/start_kernel.h [new file with mode: 0644]
include/linux/sunrpc/auth_gss.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/debug.h
include/linux/sunrpc/gss_krb5.h
include/linux/sunrpc/gss_spkm3.h
include/linux/sunrpc/rpc_pipe_fs.h
include/linux/sunrpc/sched.h
include/linux/sunrpc/xdr.h
include/linux/sunrpc/xprt.h
include/linux/suspend.h
include/linux/swap.h
include/linux/taskstats_kern.h
include/linux/tty.h
include/linux/types.h
include/linux/uaccess.h
include/linux/usb.h
include/linux/workqueue.h
include/net/dst.h
include/net/ieee80211softmac.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/irda/irlan_filter.h
include/net/neighbour.h
include/net/netfilter/nf_conntrack_expect.h
include/net/request_sock.h
include/net/sctp/structs.h
include/net/sock.h
include/net/timewait_sock.h
include/net/xfrm.h
include/pcmcia/ss.h
include/scsi/libsas.h
include/scsi/libsrp.h [new file with mode: 0644]
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h
include/scsi/scsi_tgt.h [new file with mode: 0644]
include/scsi/scsi_tgt_if.h [new file with mode: 0644]
include/scsi/scsi_transport_fc.h
include/scsi/scsi_transport_iscsi.h
include/scsi/scsi_transport_sas.h
include/sound/ac97_codec.h
include/sound/ak4114.h
init/do_mounts_initrd.c
init/initramfs.c
init/main.c
ipc/compat.c
ipc/mqueue.c
ipc/msg.c
ipc/sem.c
ipc/util.c
kernel/Kconfig.hz
kernel/acct.c
kernel/audit.c
kernel/auditfilter.c
kernel/auditsc.c
kernel/configs.c
kernel/cpu.c
kernel/cpuset.c
kernel/delayacct.c
kernel/dma.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/irq/handle.c
kernel/kallsyms.c
kernel/kexec.c
kernel/kmod.c
kernel/kprobes.c
kernel/kthread.c
kernel/lockdep.c
kernel/lockdep_internals.h
kernel/lockdep_proc.c
kernel/module.c
kernel/mutex-debug.c
kernel/pid.c
kernel/posix-timers.c
kernel/power/Kconfig
kernel/power/disk.c
kernel/power/main.c
kernel/power/power.h
kernel/power/poweroff.c
kernel/power/process.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/power/swsusp.c
kernel/power/user.c
kernel/printk.c
kernel/profile.c
kernel/rcupdate.c
kernel/rcutorture.c
kernel/relay.c
kernel/resource.c
kernel/rtmutex-tester.c
kernel/sched.c
kernel/signal.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/taskstats.c
kernel/unwind.c
kernel/user.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/cmdline.c
lib/idr.c
lib/iomap.c
lib/kobject.c
lib/list_debug.c
lib/locking-selftest.c
lib/radix-tree.c
lib/spinlock_debug.c
mm/allocpercpu.c
mm/bootmem.c
mm/filemap.c
mm/fremap.c
mm/hugetlb.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/mmzone.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_io.c
mm/pdflush.c
mm/readahead.c
mm/shmem.c
mm/slab.c
mm/sparse.c
mm/swap.c
mm/swapfile.c
mm/thrash.c
mm/vmscan.c
mm/vmstat.c
net/atm/lec.c
net/atm/lec.h
net/bluetooth/hci_sysfs.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/core/dev.c
net/core/dst.c
net/core/flow.c
net/core/link_watch.c
net/core/neighbour.c
net/core/netpoll.c
net/core/skbuff.c
net/core/sock.c
net/core/wireless.c
net/dccp/ackvec.c
net/dccp/ccid.c
net/dccp/ccid.h
net/dccp/ccids/ccid3.c
net/dccp/ccids/lib/loss_interval.c
net/dccp/ccids/lib/loss_interval.h
net/dccp/ccids/lib/packet_history.h
net/dccp/minisocks.c
net/decnet/dn_table.c
net/ieee80211/softmac/ieee80211softmac_assoc.c
net/ieee80211/softmac/ieee80211softmac_auth.c
net/ieee80211/softmac/ieee80211softmac_event.c
net/ieee80211/softmac/ieee80211softmac_module.c
net/ieee80211/softmac/ieee80211softmac_priv.h
net/ieee80211/softmac/ieee80211softmac_scan.c
net/ieee80211/softmac/ieee80211softmac_wx.c
net/ipv4/fib_hash.c
net/ipv4/fib_trie.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/inetpeer.c
net/ipv4/ipmr.c
net/ipv4/ipvs/ip_vs_conn.c
net/ipv4/ipvs/ip_vs_ctl.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_conntrack_core.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv4/xfrm4_policy.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/raw.c
net/ipv6/xfrm6_tunnel.c
net/irda/ircomm/ircomm_tty.c
net/irda/irttp.c
net/key/af_key.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/xt_hashlimit.c
net/netlink/genetlink.c
net/packet/af_packet.c
net/rxrpc/krxiod.c
net/rxrpc/krxsecd.c
net/rxrpc/krxtimod.c
net/sched/cls_fw.c
net/sctp/associola.c
net/sctp/endpointola.c
net/sctp/inqueue.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/socket.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_seal.c
net/sunrpc/auth_gss/gss_krb5_unseal.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/gss_spkm3_mech.c
net/sunrpc/auth_gss/gss_spkm3_seal.c
net/sunrpc/auth_gss/gss_spkm3_token.c
net/sunrpc/auth_gss/gss_spkm3_unseal.c
net/sunrpc/cache.c
net/sunrpc/clnt.c
net/sunrpc/pmap_clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/sched.c
net/sunrpc/socklib.c
net/sunrpc/sunrpc_syms.c
net/sunrpc/svcauth.c
net/sunrpc/svcsock.c
net/sunrpc/sysctl.c
net/sunrpc/xdr.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/handler.c
net/wanrouter/wanmain.c
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/kallsyms.c
scripts/kconfig/qconf.cc
scripts/kernel-doc
scripts/mod/modpost.c
scripts/ver_linux
security/keys/key.c
security/keys/keyring.c
security/keys/process_keys.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc.h
security/selinux/ss/avtab.c
sound/aoa/aoa-gpio.h
sound/aoa/core/snd-aoa-gpio-feature.c
sound/aoa/core/snd-aoa-gpio-pmf.c
sound/arm/sa11xx-uda1341.c
sound/core/pcm_native.c
sound/i2c/other/ak4114.c
sound/oss/Kconfig
sound/oss/btaudio.c
sound/oss/emu10k1/audio.c
sound/oss/emu10k1/cardwi.c
sound/oss/emu10k1/cardwi.h
sound/oss/emu10k1/passthrough.c
sound/oss/via82cxxx_audio.c
sound/pci/ac97/ac97_codec.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_local.h
sound/pcmcia/pdaudiocf/pdaudiocf.c
sound/pcmcia/vx/vxpocket.c
sound/ppc/tumbler.c
sound/usb/usx2y/usX2Yhwdep.c

index 05431621c861d7b9f0228a228414beb3fb3ee52e..805db4b2cba65b65bd966f054c71ca13427f9287 100644 (file)
@@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h>
 Many drivers need lots of small dma-coherent memory regions for DMA
 descriptors or I/O buffers.  Rather than allocating in units of a page
 or more using dma_alloc_coherent(), you can use DMA pools.  These work
-much like a kmem_cache_t, except that they use the dma-coherent allocator
+much like a struct kmem_cache, except that they use the dma-coherent allocator
 not __get_free_pages().  Also, they understand common hardware constraints
 for alignment, like queue heads needing to be aligned on N byte boundaries.
 
@@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers
 for use with a given device.  It must be called in a context which
 can sleep.
 
-The "name" is for diagnostics (like a kmem_cache_t name); dev and size
+The "name" is for diagnostics (like a struct kmem_cache name); dev and size
 are like what you'd pass to dma_alloc_coherent().  The device's hardware
 alignment requirement for this type of data is "align" (which is expressed
 in bytes, and must be a power of two).  If your device has no boundary
@@ -431,10 +431,10 @@ be identical to those passed in (and returned by
 dma_alloc_noncoherent()).
 
 int
-dma_is_consistent(dma_addr_t dma_handle)
+dma_is_consistent(struct device *dev, dma_addr_t dma_handle)
 
-returns true if the memory pointed to by the dma_handle is actually
-consistent.
+returns true if the device dev is performing consistent DMA on the memory
+area pointed to by the dma_handle.
 
 int
 dma_get_cache_alignment(void)
@@ -459,7 +459,7 @@ anything like this.  You must also be extra careful about accessing
 memory you intend to sync partially.
 
 void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 
 Do a partial sync of memory that was allocated by
index db9499adbed4df18eb23c8b6e903baf8ced64e6c..36526a1e76d753e83bc752acf584e93d5db2f0b5 100644 (file)
@@ -190,9 +190,13 @@ quiet_cmd_fig2png = FIG2PNG $@
 ###
 # Help targets as used by the top-level makefile
 dochelp:
-       @echo  '  Linux kernel internal documentation in different formats:'
-       @echo  '  xmldocs (XML DocBook), psdocs (Postscript), pdfdocs (PDF)'
-       @echo  '  htmldocs (HTML), mandocs (man pages, use installmandocs to install)'
+       @echo  ' Linux kernel internal documentation in different formats:'
+       @echo  '  htmldocs        - HTML'
+       @echo  '  installmandocs  - install man pages generated by mandocs'
+       @echo  '  mandocs         - man pages'
+       @echo  '  pdfdocs         - PDF'
+       @echo  '  psdocs          - Postscript'
+       @echo  '  xmldocs         - XML DocBook'
 
 ###
 # Temporary files left by various tools
index a166675c4303150c7be6111f917b0f2bcd422998..ca094913c5555fc99caeadf5b964d0d6e0195de0 100644 (file)
@@ -418,9 +418,35 @@ X!Edrivers/pnp/system.c
 !Idrivers/parport/daisy.c
   </chapter>
 
-  <chapter id="viddev">
-     <title>Video4Linux</title>
-!Edrivers/media/video/videodev.c
+  <chapter id="message_devices">
+       <title>Message-based devices</title>
+     <sect1><title>Fusion message devices</title>
+!Edrivers/message/fusion/mptbase.c
+!Idrivers/message/fusion/mptbase.c
+!Edrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptscsih.c
+!Idrivers/message/fusion/mptctl.c
+!Idrivers/message/fusion/mptspi.c
+!Idrivers/message/fusion/mptfc.c
+!Idrivers/message/fusion/mptlan.c
+     </sect1>
+     <sect1><title>I2O message devices</title>
+!Iinclude/linux/i2o.h
+!Idrivers/message/i2o/core.h
+!Edrivers/message/i2o/iop.c
+!Idrivers/message/i2o/iop.c
+!Idrivers/message/i2o/config-osm.c
+!Edrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/exec-osm.c
+!Idrivers/message/i2o/bus-osm.c
+!Edrivers/message/i2o/device.c
+!Idrivers/message/i2o/device.c
+!Idrivers/message/i2o/driver.c
+!Idrivers/message/i2o/pci.c
+!Idrivers/message/i2o/i2o_block.c
+!Idrivers/message/i2o/i2o_scsi.c
+!Idrivers/message/i2o/i2o_proc.c
+     </sect1>
   </chapter>
 
   <chapter id="snddev">
index 0e3924ecd76b4e9b197348d656712fee80a2691d..24dc3fcf15948e8a9ed93043cf46c0e81f929c36 100644 (file)
@@ -365,6 +365,7 @@ You can change this at module load time (for a module) with:
        regshifts=<shift1>,<shift2>,...
        slave_addrs=<addr1>,<addr2>,...
        force_kipmid=<enable1>,<enable2>,...
+       unload_when_empty=[0|1]
 
 Each of these except si_trydefaults is a list, the first item for the
 first interface, second item for the second interface, etc.
@@ -416,6 +417,11 @@ by the driver, but systems with broken interrupts might need an enable,
 or users that don't want the daemon (don't need the performance, don't
 want the CPU hit) can disable it.
 
+If unload_when_empty is set to 1, the driver will be unloaded if it
+doesn't find any interfaces or all the interfaces fail to work.  The
+default is one.  Setting to 0 is useful with the hotmod, but is
+obviously only useful for modules.
+
 When compiled into the kernel, the parameters can be specified on the
 kernel command line as:
 
@@ -441,6 +447,25 @@ have high-res timers enabled in the kernel and you don't have
 interrupts enabled, the driver will run VERY slowly.  Don't blame me,
 these interfaces suck.
 
+The driver supports a hot add and remove of interfaces.  This way,
+interfaces can be added or removed after the kernel is up and running.
+This is done using /sys/modules/ipmi_si/hotmod, which is a write-only
+parameter.  You write a string to this interface.  The string has the
+format:
+   <op1>[:op2[:op3...]]
+The "op"s are:
+   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+You can specify more than one interface on the line.  The "opt"s are:
+   rsp=<regspacing>
+   rsi=<regsize>
+   rsh=<regshift>
+   irq=<irq>
+   ipmb=<ipmb slave addr>
+and these have the same meanings as discussed above.  Note that you
+can also use this on the kernel command line for a more compact format
+for specifying an interface.  Note that when removing an interface,
+only the first three parameters (si type, address type, and address)
+are used for the comparison.  Any options are ignored for removing.
 
 The SMBus Driver
 ----------------
@@ -502,7 +527,10 @@ used to control it:
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x
+      nowayout=x ifnum_to_use=n
+
+ifnum_to_use specifies which interface the watchdog timer should use.
+The default is -1, which means to pick the first one registered.
 
 The timeout is the number of seconds to the action, and the pretimeout
 is the amount of seconds before the reset that the pre-timeout panic will
@@ -624,5 +652,9 @@ command line.  The parameter is also available via the proc filesystem
 in /proc/sys/dev/ipmi/poweroff_powercycle.  Note that if the system
 does not support power cycling, it will always do the power off.
 
+The "ifnum_to_use" parameter specifies which interface the poweroff
+code should use.  The default is -1, which means to pick the first one
+registered.
+
 Note that if you have ACPI enabled, the system will prefer using ACPI to
 power off.
index e2a66f8143c5c63b0b780ee13fc1069b87c82011..a598fe10a2974f5757761df5ab8f7f98c5c5f84a 100644 (file)
@@ -24,8 +24,10 @@ very similar behavior to the deadline IO scheduler.
 Selecting IO schedulers
 -----------------------
 To choose IO schedulers at boot time, use the argument 'elevator=deadline'.
-'noop' and 'as' (the default) are also available. IO schedulers are assigned
-globally at boot time only presently.
+'noop', 'as' and 'cfq' (the default) are also available. IO schedulers are
+assigned globally at boot time only presently. It's also possible to change
+the IO scheduler for a determined device on the fly, as described in
+Documentation/block/switching-sched.txt.
 
 
 Anticipatory IO scheduler Policies
index 70690f1a14af53cd128ab0a553421044e28cd319..8de132a02ba952867020a846229b0818ee73d214 100644 (file)
@@ -3,7 +3,7 @@
 
             Maintained by Torben Mathiasen <device@lanana.org>
 
-                     Last revised: 15 May 2006
+                     Last revised: 29 November 2006
 
 This list is the Linux Device List, the official registry of allocated
 device numbers and /dev directory nodes for the Linux operating
@@ -94,6 +94,7 @@ Your cooperation is appreciated.
                  9 = /dev/urandom      Faster, less secure random number gen.
                 10 = /dev/aio          Asynchronous I/O notification interface
                 11 = /dev/kmsg         Writes to this come out as printk's
+
   1 block      RAM disk
                  0 = /dev/ram0         First RAM disk
                  1 = /dev/ram1         Second RAM disk
@@ -122,7 +123,7 @@ Your cooperation is appreciated.
                devices are on major 128 and above and use the PTY
                master multiplex (/dev/ptmx) to acquire a PTY on
                demand.
-  
+
   2 block      Floppy disks
                  0 = /dev/fd0          Controller 0, drive 0, autodetect
                  1 = /dev/fd1          Controller 0, drive 1, autodetect
@@ -257,7 +258,7 @@ Your cooperation is appreciated.
                129 = /dev/vcsa1        tty1 text/attribute contents
                    ...
                191 = /dev/vcsa63       tty63 text/attribute contents
-       
+
                NOTE: These devices permit both read and write access.
 
   7 block      Loopback devices
@@ -411,7 +412,7 @@ Your cooperation is appreciated.
                207 = /dev/video/em8300_sp      EM8300 DVD decoder subpicture
                208 = /dev/compaq/cpqphpc       Compaq PCI Hot Plug Controller
                209 = /dev/compaq/cpqrid        Compaq Remote Insight Driver
-               210 = /dev/impi/bt      IMPI coprocessor block transfer 
+               210 = /dev/impi/bt      IMPI coprocessor block transfer
                211 = /dev/impi/smic    IMPI coprocessor stream interface
                212 = /dev/watchdogs/0  First watchdog device
                213 = /dev/watchdogs/1  Second watchdog device
@@ -506,6 +507,7 @@ Your cooperation is appreciated.
                 33 = /dev/patmgr1      Sequencer patch manager
                 34 = /dev/midi02       Third MIDI port
                 50 = /dev/midi03       Fourth MIDI port
+
  14 block      BIOS harddrive callback support {2.6}
                  0 = /dev/dos_hda      First BIOS harddrive whole disk
                 64 = /dev/dos_hdb      Second BIOS harddrive whole disk
@@ -527,6 +529,7 @@ Your cooperation is appreciated.
 
  16 char       Non-SCSI scanners
                  0 = /dev/gs4500       Genius 4500 handheld scanner
+
  16 block      GoldStar CD-ROM
                  0 = /dev/gscd         GoldStar CD-ROM
 
@@ -548,6 +551,7 @@ Your cooperation is appreciated.
                  0 = /dev/ttyC0        First Cyclades port
                    ...
                 31 = /dev/ttyC31       32nd Cyclades port
+
  19 block      "Double" compressed disk
                  0 = /dev/double0      First compressed disk
                    ...
@@ -563,6 +567,7 @@ Your cooperation is appreciated.
                  0 = /dev/cub0         Callout device for ttyC0
                    ...
                 31 = /dev/cub31        Callout device for ttyC31
+
  20 block      Hitachi CD-ROM (under development)
                  0 = /dev/hitcd        Hitachi CD-ROM
 
@@ -582,7 +587,7 @@ Your cooperation is appreciated.
 
                This device is used on the ARM-based Acorn RiscPC.
                Partitions are handled the same way as for IDE disks
-               (see major number 3). 
+               (see major number 3).
 
  22 char       Digiboard serial card
                  0 = /dev/ttyD0        First Digiboard port
@@ -591,7 +596,7 @@ Your cooperation is appreciated.
  22 block      Second IDE hard disk/CD-ROM interface
                  0 = /dev/hdc          Master: whole disk (or CD-ROM)
                 64 = /dev/hdd          Slave: whole disk (or CD-ROM)
-               
+
                Partitions are handled the same way as for the first
                interface (see major number 3).
 
@@ -639,6 +644,7 @@ Your cooperation is appreciated.
 
  26 char       Quanta WinVision frame grabber {2.6}
                  0 = /dev/wvisfgrab    Quanta WinVision frame grabber
+
  26 block      Second Matsushita (Panasonic/SoundBlaster) CD-ROM
                  0 = /dev/sbpcd4       Panasonic CD-ROM controller 1 unit 0
                  1 = /dev/sbpcd5       Panasonic CD-ROM controller 1 unit 1
@@ -670,6 +676,7 @@ Your cooperation is appreciated.
                 37 = /dev/nrawqft1     Unit 1, no rewind-on-close, no file marks
                 38 = /dev/nrawqft2     Unit 2, no rewind-on-close, no file marks
                 39 = /dev/nrawqft3     Unit 3, no rewind-on-close, no file marks
+
  27 block      Third Matsushita (Panasonic/SoundBlaster) CD-ROM
                  0 = /dev/sbpcd8       Panasonic CD-ROM controller 2 unit 0
                  1 = /dev/sbpcd9       Panasonic CD-ROM controller 2 unit 1
@@ -681,6 +688,7 @@ Your cooperation is appreciated.
                  1 = /dev/staliomem1   Second Stallion card I/O memory
                  2 = /dev/staliomem2   Third Stallion card I/O memory
                  3 = /dev/staliomem3   Fourth Stallion card I/O memory
+
  28 char       Atari SLM ACSI laser printer (68k/Atari)
                  0 = /dev/slm0         First SLM laser printer
                  1 = /dev/slm1         Second SLM laser printer
@@ -690,6 +698,7 @@ Your cooperation is appreciated.
                  1 = /dev/sbpcd13      Panasonic CD-ROM controller 3 unit 1
                  2 = /dev/sbpcd14      Panasonic CD-ROM controller 3 unit 2
                  3 = /dev/sbpcd15      Panasonic CD-ROM controller 3 unit 3
+
  28 block      ACSI disk (68k/Atari)
                  0 = /dev/ada          First ACSI disk whole disk
                 16 = /dev/adb          Second ACSI disk whole disk
@@ -750,6 +759,7 @@ Your cooperation is appreciated.
  31 char       MPU-401 MIDI
                  0 = /dev/mpu401data   MPU-401 data port
                  1 = /dev/mpu401stat   MPU-401 status port
+
  31 block      ROM/flash memory card
                  0 = /dev/rom0         First ROM card (rw)
                      ...
@@ -801,7 +811,7 @@ Your cooperation is appreciated.
  34 block      Fourth IDE hard disk/CD-ROM interface
                  0 = /dev/hdg          Master: whole disk (or CD-ROM)
                 64 = /dev/hdh          Slave: whole disk (or CD-ROM)
-               
+
                Partitions are handled the same way as for the first
                interface (see major number 3).
 
@@ -818,6 +828,7 @@ Your cooperation is appreciated.
                129 = /dev/smpte1       Second MIDI port, SMPTE timed
                130 = /dev/smpte2       Third MIDI port, SMPTE timed
                131 = /dev/smpte3       Fourth MIDI port, SMPTE timed
+
  35 block      Slow memory ramdisk
                  0 = /dev/slram        Slow memory ramdisk
 
@@ -828,6 +839,7 @@ Your cooperation is appreciated.
                 16 = /dev/tap0         First Ethertap device
                    ...
                 31 = /dev/tap15        16th Ethertap device
+
  36 block      MCA ESDI hard disk
                  0 = /dev/eda          First ESDI disk whole disk
                 64 = /dev/edb          Second ESDI disk whole disk
@@ -882,6 +894,7 @@ Your cooperation is appreciated.
 
  40 char       Matrox Meteor frame grabber {2.6}
                  0 = /dev/mmetfgrab    Matrox Meteor frame grabber
+
  40 block      Syquest EZ135 parallel port removable drive
                  0 = /dev/eza          Parallel EZ135 drive, whole disk
 
@@ -893,6 +906,7 @@ Your cooperation is appreciated.
 
  41 char       Yet Another Micro Monitor
                  0 = /dev/yamm         Yet Another Micro Monitor
+
  41 block      MicroSolutions BackPack parallel port CD-ROM
                  0 = /dev/bpcd         BackPack CD-ROM
 
@@ -901,6 +915,7 @@ Your cooperation is appreciated.
                the parallel port ATAPI CD-ROM driver at major number 46.
 
  42 char       Demo/sample use
+
  42 block      Demo/sample use
 
                This number is intended for use in sample code, as
@@ -918,6 +933,7 @@ Your cooperation is appreciated.
                  0 = /dev/ttyI0        First virtual modem
                    ...
                 63 = /dev/ttyI63       64th virtual modem
+
  43 block      Network block devices
                  0 = /dev/nb0          First network block device
                  1 = /dev/nb1          Second network block device
@@ -934,12 +950,13 @@ Your cooperation is appreciated.
                  0 = /dev/cui0         Callout device for ttyI0
                    ...
                 63 = /dev/cui63        Callout device for ttyI63
+
  44 block      Flash Translation Layer (FTL) filesystems
                  0 = /dev/ftla         FTL on first Memory Technology Device
                 16 = /dev/ftlb         FTL on second Memory Technology Device
                 32 = /dev/ftlc         FTL on third Memory Technology Device
                    ...
-               240 = /dev/ftlp         FTL on 16th Memory Technology Device 
+               240 = /dev/ftlp         FTL on 16th Memory Technology Device
 
                Partitions are handled in the same way as for IDE
                disks (see major number 3) except that the partition
@@ -958,6 +975,7 @@ Your cooperation is appreciated.
                191 = /dev/ippp63       64th SyncPPP device
 
                255 = /dev/isdninfo     ISDN monitor interface
+
  45 block      Parallel port IDE disk devices
                  0 = /dev/pda          First parallel port IDE disk
                 16 = /dev/pdb          Second parallel port IDE disk
@@ -1044,6 +1062,7 @@ Your cooperation is appreciated.
                  1 = /dev/dcbri1       Second DataComm card
                  2 = /dev/dcbri2       Third DataComm card
                  3 = /dev/dcbri3       Fourth DataComm card
+
  52 block      Mylex DAC960 PCI RAID controller; fifth controller
                  0 = /dev/rd/c4d0      First disk, whole disk
                  8 = /dev/rd/c4d1      Second disk, whole disk
@@ -1093,6 +1112,7 @@ Your cooperation is appreciated.
 
  55 char       DSP56001 digital signal processor
                  0 = /dev/dsp56k       First DSP56001
+
  55 block      Mylex DAC960 PCI RAID controller; eighth controller
                  0 = /dev/rd/c7d0      First disk, whole disk
                  8 = /dev/rd/c7d1      Second disk, whole disk
@@ -1130,6 +1150,7 @@ Your cooperation is appreciated.
                  0 = /dev/cup0         Callout device for ttyP0
                  1 = /dev/cup1         Callout device for ttyP1
                    ...
+
  58 block      Reserved for logical volume manager
 
  59 char       sf firewall package
@@ -1149,6 +1170,7 @@ Your cooperation is appreciated.
                NAMING CONFLICT -- PROPOSED REVISED NAME /dev/rpda0 etc
 
  60-63 char    LOCAL/EXPERIMENTAL USE
+
  60-63 block   LOCAL/EXPERIMENTAL USE
                Allocated for local/experimental use.  For devices not
                assigned official numbers, these ranges should be
@@ -1434,7 +1456,6 @@ Your cooperation is appreciated.
                DAC960 (see major number 48) except that the limit on
                partitions is 15.
 
-
  78 char       PAM Software's multimodem boards
                  0 = /dev/ttyM0        First PAM modem
                  1 = /dev/ttyM1        Second PAM modem
@@ -1450,7 +1471,6 @@ Your cooperation is appreciated.
                DAC960 (see major number 48) except that the limit on
                partitions is 15.
 
-
  79 char       PAM Software's multimodem boards - alternate devices
                  0 = /dev/cum0         Callout device for ttyM0
                  1 = /dev/cum1         Callout device for ttyM1
@@ -1466,7 +1486,6 @@ Your cooperation is appreciated.
                DAC960 (see major number 48) except that the limit on
                partitions is 15.
 
-
  80 char       Photometrics AT200 CCD camera
                  0 = /dev/at200        Photometrics AT200 CCD camera
 
@@ -1679,7 +1698,7 @@ Your cooperation is appreciated.
                  1 = /dev/dcxx1        Second capture card
                    ...
 
- 94 block IBM S/390 DASD block storage
+ 94 block      IBM S/390 DASD block storage
                  0 = /dev/dasda First DASD device, major
                  1 = /dev/dasda1 First DASD device, block 1
                  2 = /dev/dasda2 First DASD device, block 2
@@ -1695,7 +1714,7 @@ Your cooperation is appreciated.
                  1 = /dev/ipnat        NAT control device/log file
                  2 = /dev/ipstate      State information log file
                  3 = /dev/ipauth       Authentication control device/log file
-                   ...         
+                   ...
 
  96 char       Parallel port ATAPI tape devices
                  0 = /dev/pt0          First parallel port ATAPI tape
@@ -1705,7 +1724,7 @@ Your cooperation is appreciated.
                129 = /dev/npt1         Second p.p. ATAPI tape, no rewind
                    ...
 
- 96 block Inverse NAND Flash Translation Layer
+ 96 block      Inverse NAND Flash Translation Layer
                  0 = /dev/inftla First INFTL layer
                 16 = /dev/inftlb Second INFTL layer
                    ...
@@ -1937,7 +1956,6 @@ Your cooperation is appreciated.
                    ...
 
 113 block      IBM iSeries virtual CD-ROM
-
                  0 = /dev/iseries/vcda First virtual CD-ROM
                  1 = /dev/iseries/vcdb Second virtual CD-ROM
                    ...
@@ -2059,11 +2077,12 @@ Your cooperation is appreciated.
                    ...
 
 119 char       VMware virtual network control
-                 0 = /dev/vnet0        1st virtual network
-                 1 = /dev/vnet1        2nd virtual network
+                 0 = /dev/vmnet0       1st virtual network
+                 1 = /dev/vmnet1       2nd virtual network
                    ...
 
 120-127 char   LOCAL/EXPERIMENTAL USE
+
 120-127 block  LOCAL/EXPERIMENTAL USE
                Allocated for local/experimental use.  For devices not
                assigned official numbers, these ranges should be
@@ -2075,7 +2094,6 @@ Your cooperation is appreciated.
                nodes; instead they should be accessed through the
                /dev/ptmx cloning interface.
 
-
 128 block       SCSI disk devices (128-143)
                   0 = /dev/sddy         129th SCSI disk whole disk
                  16 = /dev/sddz         130th SCSI disk whole disk
@@ -2087,7 +2105,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 129 block       SCSI disk devices (144-159)
                   0 = /dev/sdeo         145th SCSI disk whole disk
                  16 = /dev/sdep         146th SCSI disk whole disk
@@ -2123,7 +2140,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 132 block       SCSI disk devices (192-207)
                   0 = /dev/sdgk         193rd SCSI disk whole disk
                  16 = /dev/sdgl         194th SCSI disk whole disk
@@ -2135,7 +2151,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 133 block       SCSI disk devices (208-223)
                   0 = /dev/sdha         209th SCSI disk whole disk
                  16 = /dev/sdhb         210th SCSI disk whole disk
@@ -2147,7 +2162,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 134 block       SCSI disk devices (224-239)
                   0 = /dev/sdhq         225th SCSI disk whole disk
                  16 = /dev/sdhr         226th SCSI disk whole disk
@@ -2159,7 +2173,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 135 block       SCSI disk devices (240-255)
                   0 = /dev/sdig         241st SCSI disk whole disk
                  16 = /dev/sdih         242nd SCSI disk whole disk
@@ -2171,7 +2184,6 @@ Your cooperation is appreciated.
                disks (see major number 3) except that the limit on
                partitions is 15.
 
-
 136-143 char   Unix98 PTY slaves
                  0 = /dev/pts/0        First Unix98 pseudo-TTY
                  1 = /dev/pts/1        Second Unix98 pesudo-TTY
@@ -2384,6 +2396,7 @@ Your cooperation is appreciated.
                    ...
 
 159 char       RESERVED
+
 159 block      RESERVED
 
 160 char       General Purpose Instrument Bus (GPIB)
@@ -2427,7 +2440,7 @@ Your cooperation is appreciated.
 
                Partitions are handled in the same way as for IDE
                disks (see major number 3) except that the limit on
-               partitions is 31. 
+               partitions is 31.
 
 162 char       Raw block device interface
                  0 = /dev/rawctl       Raw I/O control device
@@ -2483,7 +2496,6 @@ Your cooperation is appreciated.
 
 171 char       Reserved for IEEE 1394 (Firewire)
 
-
 172 char       Moxa Intellio serial card
                  0 = /dev/ttyMX0       First Moxa port
                  1 = /dev/ttyMX1       Second Moxa port
@@ -2543,9 +2555,6 @@ Your cooperation is appreciated.
                 64 = /dev/usb/rio500   Diamond Rio 500
                 65 = /dev/usb/usblcd   USBLCD Interface (info@usblcd.de)
                 66 = /dev/usb/cpad0    Synaptics cPad (mouse/LCD)
-                67 = /dev/usb/adutux0  1st Ontrak ADU device
-                   ...
-                76 = /dev/usb/adutux10 10th Ontrak ADU device
                 96 = /dev/usb/hiddev0  1st USB HID device
                    ...
                111 = /dev/usb/hiddev15 16th USB HID device
@@ -2558,7 +2567,7 @@ Your cooperation is appreciated.
                132 = /dev/usb/idmouse  ID Mouse (fingerprint scanner) device
                133 = /dev/usb/sisusbvga1       First SiSUSB VGA device
                    ...
-               140 = /dev/usb/sisusbvga8       Eigth SISUSB VGA device
+               140 = /dev/usb/sisusbvga8       Eighth SISUSB VGA device
                144 = /dev/usb/lcd      USB LCD device
                160 = /dev/usb/legousbtower0    1st USB Legotower device
                    ...
@@ -2571,7 +2580,7 @@ Your cooperation is appreciated.
                  0 = /dev/uba          First USB block device
                  8 = /dev/ubb          Second USB block device
                 16 = /dev/ubc          Third USB block device
-                   ...
+                   ...
 
 181 char       Conrad Electronic parallel port radio clocks
                  0 = /dev/pcfclock0    First Conrad radio clock
@@ -2657,7 +2666,7 @@ Your cooperation is appreciated.
                 32 = /dev/mvideo/status2       Third device
                    ...
                    ...
-               240 = /dev/mvideo/status15      16th device 
+               240 = /dev/mvideo/status15      16th device
                    ...
 
 195 char       Nvidia graphics devices
@@ -2795,6 +2804,10 @@ Your cooperation is appreciated.
                    ...
                 185 = /dev/ttyNX15             Hilscher netX serial port 15
                 186 = /dev/ttyJ0               JTAG1 DCC protocol based serial port emulation
+                187 = /dev/ttyUL0              Xilinx uartlite - port 0
+                   ...
+                190 = /dev/ttyUL3              Xilinx uartlite - port 3
+                191 = /dev/xvc0                Xen virtual console - port 0
 
 205 char       Low-density serial ports (alternate device)
                  0 = /dev/culu0                Callout device for ttyLU0
@@ -2832,7 +2845,6 @@ Your cooperation is appreciated.
                 82 = /dev/cuvr0                Callout device for ttyVR0
                 83 = /dev/cuvr1                Callout device for ttyVR1
 
-
 206 char       OnStream SC-x0 tape devices
                  0 = /dev/osst0                First OnStream SCSI tape, mode 0
                  1 = /dev/osst1                Second OnStream SCSI tape, mode 0
@@ -2922,7 +2934,6 @@ Your cooperation is appreciated.
                    ...
 
 212 char       LinuxTV.org DVB driver subsystem
-
                  0 = /dev/dvb/adapter0/video0    first video decoder of first card
                  1 = /dev/dvb/adapter0/audio0    first audio decoder of first card
                  2 = /dev/dvb/adapter0/sec0      (obsolete/unused)
@@ -3008,9 +3019,9 @@ Your cooperation is appreciated.
                  2 = /dev/3270/tub2            Second 3270 terminal
                    ...
 
-229 char       IBM iSeries virtual console
-                 0 = /dev/iseries/vtty0        First console port
-                 1 = /dev/iseries/vtty1        Second console port
+229 char       IBM iSeries/pSeries virtual console
+                 0 = /dev/hvc0                 First console port
+                 1 = /dev/hvc1                 Second console port
                    ...
 
 230 char       IBM iSeries virtual tape
@@ -3083,12 +3094,14 @@ Your cooperation is appreciated.
 234-239                UNASSIGNED
 
 240-254 char   LOCAL/EXPERIMENTAL USE
+
 240-254 block  LOCAL/EXPERIMENTAL USE
                Allocated for local/experimental use.  For devices not
                assigned official numbers, these ranges should be
                used in order to avoid conflicting with future assignments.
 
 255 char       RESERVED
+
 255 block      RESERVED
 
                This major is reserved to assist the expansion to a
@@ -3115,7 +3128,20 @@ Your cooperation is appreciated.
 257 char       Phoenix Technologies Cryptographic Services Driver
                  0 = /dev/ptlsec       Crypto Services Driver
 
-
+257 block      SSFDC Flash Translation Layer filesystem
+                 0 = /dev/ssfdca       First SSFDC layer
+                 8 = /dev/ssfdcb       Second SSFDC layer
+                16 = /dev/ssfdcc       Third SSFDC layer
+                24 = /dev/ssfdcd       4th SSFDC layer
+                32 = /dev/ssfdce       5th SSFDC layer
+                40 = /dev/ssfdcf       6th SSFDC layer
+                48 = /dev/ssfdcg       7th SSFDC layer
+                56 = /dev/ssfdch       8th SSFDC layer
+
+258 block      ROM/Flash read-only translation layer
+                 0 = /dev/blockrom0    First ROM card's translation layer interface
+                 1 = /dev/blockrom1    Second ROM card's translation layer interface
+                 ...
 
  ****  ADDITIONAL /dev DIRECTORY ENTRIES
 
index eb1a6cad21e6a8cadc993111b1f4c7099d794ad2..790ef6fbe495914f94ea351e92834d9fb206f80c 100644 (file)
@@ -124,7 +124,7 @@ sync_fs:            no      no      read
 write_super_lockfs:    ?
 unlockfs:              ?
 statfs:                        no      no      no
-remount_fs:            no      yes     maybe           (see below)
+remount_fs:            yes     yes     maybe           (see below)
 clear_inode:           no
 umount_begin:          yes     no      no
 show_options:          no                              (vfsmount->sem)
index 3d74477389587b4be4d03950845c17a1e396a707..345392c4caebdb1d7bc806d4f6f6b9fa911fc8e6 100644 (file)
@@ -51,6 +51,22 @@ homepage:
 
   http://fuse.sourceforge.net/
 
+Filesystem type
+~~~~~~~~~~~~~~~
+
+The filesystem type given to mount(2) can be one of the following:
+
+'fuse'
+
+  This is the usual way to mount a FUSE filesystem.  The first
+  argument of the mount system call may contain an arbitrary string,
+  which is not interpreted by the kernel.
+
+'fuseblk'
+
+  The filesystem is block device based.  The first argument of the
+  mount system call is interpreted as the name of the device.
+
 Mount options
 ~~~~~~~~~~~~~
 
@@ -94,6 +110,11 @@ Mount options
   The default is infinite.  Note that the size of read requests is
   limited anyway to 32 pages (which is 128kbyte on i386).
 
+'blksize=N'
+
+  Set the block size for the filesystem.  The default is 512.  This
+  option is only valid for 'fuseblk' type mounts.
+
 Control filesystem
 ~~~~~~~~~~~~~~~~~~
 
index d81722418010f551f066d264c8276c861e82ef40..253b50d1328ed8d62e873f9e6ca750972bc603ce 100644 (file)
@@ -1,11 +1,8 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
 It implements all of
   - Xenix FS,
   - SystemV/386 FS,
   - Coherent FS.
 
-This is version beta 4.
-
 To install:
 * Answer the 'System V and Coherent filesystem support' question with 'y'
   when configuring the kernel.
@@ -28,11 +25,173 @@ Bugs in the present implementation:
   for this FS on hard disk yet.
 
 
-Please report any bugs and suggestions to
-  Bruno Haible <haible@ma2s2.mathematik.uni-karlsruhe.de>
-  Pascal Haible <haible@izfm.uni-stuttgart.de>
-  Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
+These filesystems are rather similar. Here is a comparison with Minix FS:
+
+* Linux fdisk reports on partitions
+  - Minix FS     0x81 Linux/Minix
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  0x08 AIX bootable
+
+* Size of a block or zone (data allocation unit on disk)
+  - Minix FS     1024
+  - Xenix FS     1024 (also 512 ??)
+  - SystemV FS   1024 (also 512 and 2048)
+  - Coherent FS   512
+
+* General layout: all have one boot block, one super block and
+  separate areas for inodes and for directories/data.
+  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
+  all the block numbers (including the super block) are offset by one track.
+
+* Byte ordering of "short" (16 bit entities) on disk:
+  - Minix FS     little endian  0 1
+  - Xenix FS     little endian  0 1
+  - SystemV FS   little endian  0 1
+  - Coherent FS  little endian  0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Byte ordering of "long" (32 bit entities) on disk:
+  - Minix FS     little endian  0 1 2 3
+  - Xenix FS     little endian  0 1 2 3
+  - SystemV FS   little endian  0 1 2 3
+  - Coherent FS  PDP-11         2 3 0 1
+  Of course, this affects only the file system, not the data of files on it!
+
+* Inode on disk: "short", 0 means non-existent, the root dir ino is:
+  - Minix FS                            1
+  - Xenix FS, SystemV FS, Coherent FS   2
+
+* Maximum number of hard links to a file:
+  - Minix FS     250
+  - Xenix FS     ??
+  - SystemV FS   ??
+  - Coherent FS  >=10000
+
+* Free inode management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      There is a cache of a certain number of free inodes in the super-block.
+      When it is exhausted, new free inodes are found using a linear search.
+
+* Free block management:
+  - Minix FS                             a bitmap
+  - Xenix FS, SystemV FS, Coherent FS
+      Free blocks are organized in a "free list". Maybe a misleading term,
+      since it is not true that every free block contains a pointer to
+      the next free block. Rather, the free blocks are organized in chunks
+      of limited size, and every now and then a free block contains pointers
+      to the free blocks pertaining to the next chunk; the first of these
+      contains pointers and so on. The list terminates with a "block number"
+      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
+
+* Super-block location:
+  - Minix FS     block 1 = bytes 1024..2047
+  - Xenix FS     block 1 = bytes 1024..2047
+  - SystemV FS   bytes 512..1023
+  - Coherent FS  block 1 = bytes 512..1023
+
+* Super-block layout:
+  - Minix FS
+                    unsigned short s_ninodes;
+                    unsigned short s_nzones;
+                    unsigned short s_imap_blocks;
+                    unsigned short s_zmap_blocks;
+                    unsigned short s_firstdatazone;
+                    unsigned short s_log_zone_size;
+                    unsigned long s_max_size;
+                    unsigned short s_magic;
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short s_firstdatazone;
+                    unsigned long  s_nzones;
+                    unsigned short s_fzone_count;
+                    unsigned long  s_fzones[NICFREE];
+                    unsigned short s_finode_count;
+                    unsigned short s_finodes[NICINOD];
+                    char           s_flock;
+                    char           s_ilock;
+                    char           s_modified;
+                    char           s_rdonly;
+                    unsigned long  s_time;
+                    short          s_dinfo[4]; -- SystemV FS only
+                    unsigned long  s_free_zones;
+                    unsigned short s_free_inodes;
+                    short          s_dinfo[4]; -- Xenix FS only
+                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
+                    char           s_fname[6];
+                    char           s_fpack[6];
+    then they differ considerably:
+        Xenix FS
+                    char           s_clean;
+                    char           s_fill[371];
+                    long           s_magic;
+                    long           s_type;
+        SystemV FS
+                    long           s_fill[12 or 14];
+                    long           s_state;
+                    long           s_magic;
+                    long           s_type;
+        Coherent FS
+                    unsigned long  s_unique;
+    Note that Coherent FS has no magic.
+
+* Inode layout:
+  - Minix FS
+                    unsigned short i_mode;
+                    unsigned short i_uid;
+                    unsigned long  i_size;
+                    unsigned long  i_time;
+                    unsigned char  i_gid;
+                    unsigned char  i_nlinks;
+                    unsigned short i_zone[7+1+1];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short i_mode;
+                    unsigned short i_nlink;
+                    unsigned short i_uid;
+                    unsigned short i_gid;
+                    unsigned long  i_size;
+                    unsigned char  i_zone[3*(10+1+1+1)];
+                    unsigned long  i_atime;
+                    unsigned long  i_mtime;
+                    unsigned long  i_ctime;
+
+* Regular file data blocks are organized as
+  - Minix FS
+               7 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+  - Xenix FS, SystemV FS, Coherent FS
+              10 direct blocks
+               1 indirect block (pointers to blocks)
+               1 double-indirect block (pointer to pointers to blocks)
+               1 triple-indirect block (pointer to pointers to pointers to blocks)
+
+* Inode size, inodes per block
+  - Minix FS        32   32
+  - Xenix FS        64   16
+  - SystemV FS      64   16
+  - Coherent FS     64    8
+
+* Directory entry on disk
+  - Minix FS
+                    unsigned short inode;
+                    char name[14/30];
+  - Xenix FS, SystemV FS, Coherent FS
+                    unsigned short inode;
+                    char name[14];
+
+* Dir entry size, dir entries per block
+  - Minix FS     16/32    64/32
+  - Xenix FS     16       64
+  - SystemV FS   16       64
+  - Coherent FS  16       32
+
+* How to implement symbolic links such that the host fsck doesn't scream:
+  - Minix FS     normal
+  - Xenix FS     kludge: as regular files with  chmod 1000
+  - SystemV FS   ??
+  - Coherent FS  kludge: as regular files with  chmod 1000
 
-Bruno Haible
-<haible@ma2s2.mathematik.uni-karlsruhe.de>
 
+Notation: We often speak of a "block" but mean a zone (the allocation unit)
+and not the disk driver's notion of "block".
index c51314b1a463a35eb222d550409d0a29137149e2..9575de300a6173ea92dec18b198f48395721ed0e 100644 (file)
@@ -2,7 +2,7 @@
                     ----------------------------
 
                    H. Peter Anvin <hpa@zytor.com>
-                       Last update 2005-09-02
+                       Last update 2006-11-17
 
 On the i386 platform, the Linux kernel uses a rather complicated boot
 convention.  This has evolved partially due to historical aspects, as
@@ -35,6 +35,8 @@ Protocol 2.03:        (Kernel 2.4.18-pre1) Explicitly makes the highest possible
                initrd address available to the bootloader.
 
 Protocol 2.04: (Kernel 2.6.14) Extend the syssize field to four bytes.
+Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable.
+               Introduce relocatable_kernel and kernel_alignment fields.
 
 
 **** MEMORY LAYOUT
@@ -129,6 +131,8 @@ Offset      Proto   Name            Meaning
 0226/2 N/A     pad1            Unused
 0228/4 2.02+   cmd_line_ptr    32-bit pointer to the kernel command line
 022C/4 2.03+   initrd_addr_max Highest legal initrd address
+0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
+0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
 
 (1) For backwards compatibility, if the setup_sects field contains 0, the
     real value is 4.
index 15e4fed127f69bd3200d2a8ae1e59832570def7e..b79bcdf163199fe147311cb94e3711a0cb24512d 100644 (file)
@@ -599,8 +599,6 @@ and is between 256 and 4096 characters. It is defined in the file
 
        hugepages=      [HW,IA-32,IA-64] Maximal number of HugeTLB pages.
 
-       noirqbalance    [IA-32,SMP,KNL] Disable kernel irq balancing
-
        i8042.direct    [HW] Put keyboard port into non-translated mode
        i8042.dumbkbd   [HW] Pretend that controller can only read data from
                             keyboard and cannot control its state
@@ -650,6 +648,10 @@ and is between 256 and 4096 characters. It is defined in the file
        idle=           [HW]
                        Format: idle=poll or idle=halt
 
+       ignore_loglevel [KNL]
+                       Ignore loglevel setting - this will print /all/
+                       kernel messages to the console. Useful for debugging.
+
        ihash_entries=  [KNL]
                        Set number of hash buckets for inode cache.
 
@@ -714,7 +716,12 @@ and is between 256 and 4096 characters. It is defined in the file
                        Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
        isolcpus=       [KNL,SMP] Isolate CPUs from the general scheduler.
-                       Format: <cpu number>,...,<cpu number>
+                       Format:
+                       <cpu number>,...,<cpu number>
+                       or
+                       <cpu number>-<cpu number>  (must be a positive range in ascending order)
+                       or a mixture
+                       <cpu number>,...,<cpu number>-<cpu number>
                        This option can be used to specify one or more CPUs
                        to isolate from the general SMP balancing and scheduling
                        algorithms. The only way to move a process onto or off
@@ -1012,6 +1019,10 @@ and is between 256 and 4096 characters. It is defined in the file
                        emulation library even if a 387 maths coprocessor
                        is present.
 
+       noaliencache    [MM, NUMA] Disables the allcoation of alien caches in
+                       the slab allocator.  Saves per-node memory, but will
+                       impact performance on real NUMA hardware.
+
        noalign         [KNL,ARM]
 
        noapic          [SMP,APIC] Tells the kernel to not make use of any
@@ -1052,9 +1063,14 @@ and is between 256 and 4096 characters. It is defined in the file
                        in certain environments such as networked servers or
                        real-time systems.
 
+       noirqbalance    [IA-32,SMP,KNL] Disable kernel irq balancing
+
        noirqdebug      [IA-32] Disables the code which attempts to detect and
                        disable unhandled interrupt sources.
 
+       no_timer_check  [IA-32,X86_64,APIC] Disables the code which tests for
+                       broken timer IRQ sources.
+
        noisapnp        [ISAPNP] Disables ISA PnP code.
 
        noinitrd        [RAM] Tells the kernel not to load any configured
@@ -1285,6 +1301,7 @@ and is between 256 and 4096 characters. It is defined in the file
                        Param: "schedule" - profile schedule points.
                        Param: <number> - step/bucket size as a power of 2 for
                                statistical time based profiling.
+                       Param: "sleep" - profile D-state sleeping (millisecs)
 
        processor.max_cstate=   [HW,ACPI]
                        Limit processor to maximum C-state
@@ -1366,6 +1383,12 @@ and is between 256 and 4096 characters. It is defined in the file
        resume=         [SWSUSP]
                        Specify the partition device for software suspend
 
+       resume_offset=  [SWSUSP]
+                       Specify the offset from the beginning of the partition
+                       given by "resume=" at which the swap header is located,
+                       in <PAGE_SIZE> units (needed only for swap files).
+                       See  Documentation/power/swsusp-and-swap-files.txt
+
        rhash_entries=  [KNL,NET]
                        Set number of hash buckets for route cache
 
@@ -1416,6 +1439,11 @@ and is between 256 and 4096 characters. It is defined in the file
 
        scsi_logging=   [SCSI]
 
+       scsi_mod.scan=  [SCSI] sync (default) scans SCSI busses as they are
+                       discovered.  async scans them in kernel threads,
+                       allowing boot to proceed.  none ignores them, expecting
+                       user space to do the scan.
+
        selinux         [SELINUX] Disable or enable SELinux at boot time.
                        Format: { "0" | "1" }
                        See security/selinux/Kconfig help text.
@@ -1727,6 +1755,9 @@ and is between 256 and 4096 characters. It is defined in the file
        norandmaps      Don't use address space randomization
                        Equivalent to echo 0 > /proc/sys/kernel/randomize_va_space
 
+       unwind_debug=N  N > 0 will enable dwarf2 unwinder debugging
+                       This is useful to get more information why
+                       you got a "dwarf2 unwinder stuck"
 
 ______________________________________________________________________
 
index b1181ce232d987453f13917904a3f1ba180125f2..e06b6e3c1db577d5fe513cc2ea045e3d57507564 100644 (file)
@@ -58,6 +58,8 @@ fore200e.txt
        - FORE Systems PCA-200E/SBA-200E ATM NIC driver info.
 framerelay.txt
        - info on using Frame Relay/Data Link Connection Identifier (DLCI).
+generic_netlink.txt
+       - info on Generic Netlink
 ip-sysctl.txt
        - /proc/sys/net/ipv4/* variables
 ip_dynaddr.txt
diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt
new file mode 100644 (file)
index 0000000..d4f8b8b
--- /dev/null
@@ -0,0 +1,3 @@
+A wiki document on how to use Generic Netlink can be found here:
+
+ * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
diff --git a/Documentation/power/s2ram.txt b/Documentation/power/s2ram.txt
new file mode 100644 (file)
index 0000000..b05f512
--- /dev/null
@@ -0,0 +1,56 @@
+                       How to get s2ram working
+                       ~~~~~~~~~~~~~~~~~~~~~~~~
+                       2006 Linus Torvalds
+                       2006 Pavel Machek
+
+1) Check suspend.sf.net, program s2ram there has long whitelist of
+   "known ok" machines, along with tricks to use on each one.
+
+2) If that does not help, try reading tricks.txt and
+   video.txt. Perhaps problem is as simple as broken module, and
+   simple module unload can fix it.
+
+3) You can use Linus' TRACE_RESUME infrastructure, described below.
+
+                     Using TRACE_RESUME
+                     ~~~~~~~~~~~~~~~~~~
+
+I've been working at making the machines I have able to STR, and almost
+always it's a driver that is buggy. Thank God for the suspend/resume
+debugging - the thing that Chuck tried to disable. That's often the _only_
+way to debug these things, and it's actually pretty powerful (but
+time-consuming - having to insert TRACE_RESUME() markers into the device
+driver that doesn't resume and recompile and reboot).
+
+Anyway, the way to debug this for people who are interested (have a
+machine that doesn't boot) is:
+
+ - enable PM_DEBUG, and PM_TRACE
+
+ - use a script like this:
+
+       #!/bin/sh
+       sync
+       echo 1 > /sys/power/pm_trace
+       echo mem > /sys/power/state
+
+   to suspend
+
+ - if it doesn't come back up (which is usually the problem), reboot by
+   holding the power button down, and look at the dmesg output for things
+   like
+
+       Magic number: 4:156:725
+       hash matches drivers/base/power/resume.c:28
+       hash matches device 0000:01:00.0
+
+   which means that the last trace event was just before trying to resume
+   device 0000:01:00.0. Then figure out what driver is controlling that
+   device (lspci and /sys/devices/pci* is your friend), and see if you can
+   fix it, disable it, or trace into its resume function.
+
+For example, the above happens to be the VGA device on my EVO, which I
+used to run with "radeonfb" (it's an ATI Radeon mobility). It turns out
+that "radeonfb" simply cannot resume that device - it tries to set the
+PLL's, and it just _hangs_. Using the regular VGA console and letting X
+resume it instead works fine.
diff --git a/Documentation/power/swsusp-and-swap-files.txt b/Documentation/power/swsusp-and-swap-files.txt
new file mode 100644 (file)
index 0000000..06f911a
--- /dev/null
@@ -0,0 +1,60 @@
+Using swap files with software suspend (swsusp)
+       (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+
+The Linux kernel handles swap files almost in the same way as it handles swap
+partitions and there are only two differences between these two types of swap
+areas:
+(1) swap files need not be contiguous,
+(2) the header of a swap file is not in the first block of the partition that
+holds it.  From the swsusp's point of view (1) is not a problem, because it is
+already taken care of by the swap-handling code, but (2) has to be taken into
+consideration.
+
+In principle the location of a swap file's header may be determined with the
+help of appropriate filesystem driver.  Unfortunately, however, it requires the
+filesystem holding the swap file to be mounted, and if this filesystem is
+journaled, it cannot be mounted during resume from disk.  For this reason to
+identify a swap file swsusp uses the name of the partition that holds the file
+and the offset from the beginning of the partition at which the swap file's
+header is located.  For convenience, this offset is expressed in <PAGE_SIZE>
+units.
+
+In order to use a swap file with swsusp, you need to:
+
+1) Create the swap file and make it active, eg.
+
+# dd if=/dev/zero of=<swap_file_path> bs=1024 count=<swap_file_size_in_k>
+# mkswap <swap_file_path>
+# swapon <swap_file_path>
+
+2) Use an application that will bmap the swap file with the help of the
+FIBMAP ioctl and determine the location of the file's swap header, as the
+offset, in <PAGE_SIZE> units, from the beginning of the partition which
+holds the swap file.
+
+3) Add the following parameters to the kernel command line:
+
+resume=<swap_file_partition> resume_offset=<swap_file_offset>
+
+where <swap_file_partition> is the partition on which the swap file is located
+and <swap_file_offset> is the offset of the swap header determined by the
+application in 2) (of course, this step may be carried out automatically
+by the same application that determies the swap file's header offset using the
+FIBMAP ioctl)
+
+OR
+
+Use a userland suspend application that will set the partition and offset
+with the help of the SNAPSHOT_SET_SWAP_AREA ioctl described in
+Documentation/power/userland-swsusp.txt (this is the only method to suspend
+to a swap file allowing the resume to be initiated from an initrd or initramfs
+image).
+
+Now, swsusp will use the swap file in the same way in which it would use a swap
+partition.  In particular, the swap file has to be active (ie. be present in
+/proc/swaps) so that it can be used for suspending.
+
+Note that if the swap file used for suspending is deleted and recreated,
+the location of its header need not be the same as before.  Thus every time
+this happens the value of the "resume_offset=" kernel command line parameter
+has to be updated.
index e635e6f1e316ca549834716c95fccc3c61b35748..0761ff6c57eddf65779e11d4991e751c250fe340 100644 (file)
@@ -297,20 +297,12 @@ system is shut down or suspended. Additionally use the encrypted
 suspend image to prevent sensitive data from being stolen after
 resume.
 
-Q: Why can't we suspend to a swap file?
+Q: Can I suspend to a swap file?
 
-A: Because accessing swap file needs the filesystem mounted, and
-filesystem might do something wrong (like replaying the journal)
-during mount.
-
-There are few ways to get that fixed:
-
-1) Probably could be solved by modifying every filesystem to support
-some kind of "really read-only!" option. Patches welcome.
-
-2) suspend2 gets around that by storing absolute positions in on-disk
-image (and blocksize), with resume parameter pointing directly to
-suspend header.
+A: Generally, yes, you can.  However, it requires you to use the "resume=" and
+"resume_offset=" kernel command line parameters, so the resume from a swap file
+cannot be initiated from an initrd or initramfs image.  See
+swsusp-and-swap-files.txt for details.
 
 Q: Is there a maximum system RAM size that is supported by swsusp?
 
index 64755e9285dbfe9236916bc7aca819b9d41f908e..000556c932e9fc4a4ede893f997d66833c4902c9 100644 (file)
@@ -9,9 +9,8 @@ done it already.
 Now, to use the userland interface for software suspend you need special
 utilities that will read/write the system memory snapshot from/to the
 kernel.  Such utilities are available, for example, from
-<http://www.sisk.pl/kernel/utilities/suspend>.  You may want to have
-a look at them if you are going to develop your own suspend/resume
-utilities.
+<http://suspend.sourceforge.net>.  You may want to have a look at them if you
+are going to develop your own suspend/resume utilities.
 
 The interface consists of a character device providing the open(),
 release(), read(), and write() operations as well as several ioctl()
@@ -21,9 +20,9 @@ be read from /sys/class/misc/snapshot/dev.
 
 The device can be open either for reading or for writing.  If open for
 reading, it is considered to be in the suspend mode.  Otherwise it is
-assumed to be in the resume mode.  The device cannot be open for reading
-and writing.  It is also impossible to have the device open more than once
-at a time.
+assumed to be in the resume mode.  The device cannot be open for simultaneous
+reading and writing.  It is also impossible to have the device open more than
+once at a time.
 
 The ioctl() commands recognized by the device are:
 
@@ -69,9 +68,46 @@ SNAPSHOT_FREE_SWAP_PAGES - free all swap pages allocated with
 SNAPSHOT_SET_SWAP_FILE - set the resume partition (the last ioctl() argument
        should specify the device's major and minor numbers in the old
        two-byte format, as returned by the stat() function in the .st_rdev
-       member of the stat structure); it is recommended to always use this
-       call, because the code to set the resume partition could be removed from
-       future kernels
+       member of the stat structure)
+
+SNAPSHOT_SET_SWAP_AREA - set the resume partition and the offset (in <PAGE_SIZE>
+       units) from the beginning of the partition at which the swap header is
+       located (the last ioctl() argument should point to a struct
+       resume_swap_area, as defined in kernel/power/power.h, containing the
+       resume device specification, as for the SNAPSHOT_SET_SWAP_FILE ioctl(),
+       and the offset); for swap partitions the offset is always 0, but it is
+       different to zero for swap files (please see
+       Documentation/swsusp-and-swap-files.txt for details).
+       The SNAPSHOT_SET_SWAP_AREA ioctl() is considered as a replacement for
+       SNAPSHOT_SET_SWAP_FILE which is regarded as obsolete.   It is
+       recommended to always use this call, because the code to set the resume
+       partition may be removed from future kernels
+
+SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
+       immediately enter the suspend-to-RAM state, so this call must always
+       be preceded by the SNAPSHOT_FREEZE call and it is also necessary
+       to use the SNAPSHOT_UNFREEZE call after the system wakes up.  This call
+       is needed to implement the suspend-to-both mechanism in which the
+       suspend image is first created, as though the system had been suspended
+       to disk, and then the system is suspended to RAM (this makes it possible
+       to resume the system from RAM if there's enough battery power or restore
+       its state on the basis of the saved suspend image otherwise)
+
+SNAPSHOT_PMOPS - enable the usage of the pmops->prepare, pmops->enter and
+       pmops->finish methods (the in-kernel swsusp knows these as the "platform
+       method") which are needed on many machines to (among others) speed up
+       the resume by letting the BIOS skip some steps or to let the system
+       recognise the correct state of the hardware after the resume (in
+       particular on many machines this ensures that unplugged AC
+       adapters get correctly detected and that kacpid does not run wild after
+       the resume).  The last ioctl() argument can take one of the three
+       values, defined in kernel/power/power.h:
+       PMOPS_PREPARE - make the kernel carry out the
+               pm_ops->prepare(PM_SUSPEND_DISK) operation
+       PMOPS_ENTER - make the kernel power off the system by calling
+               pm_ops->enter(PM_SUSPEND_DISK)
+       PMOPS_FINISH - make the kernel carry out the
+               pm_ops->finish(PM_SUSPEND_DISK) operation
 
 The device's read() operation can be used to transfer the snapshot image from
 the kernel.  It has the following limitations:
@@ -91,10 +127,12 @@ unfreeze user space processes frozen by SNAPSHOT_UNFREEZE if they are
 still frozen when the device is being closed).
 
 Currently it is assumed that the userland utilities reading/writing the
-snapshot image from/to the kernel will use a swap partition, called the resume
-partition, as storage space.  However, this is not really required, as they
-can use, for example, a special (blank) suspend partition or a file on a partition
-that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and mounted afterwards.
+snapshot image from/to the kernel will use a swap parition, called the resume
+partition, or a swap file as storage space (if a swap file is used, the resume
+partition is the partition that holds this file).  However, this is not really
+required, as they can use, for example, a special (blank) suspend partition or
+a file on a partition that is unmounted before SNAPSHOT_ATOMIC_SNAPSHOT and
+mounted afterwards.
 
 These utilities SHOULD NOT make any assumptions regarding the ordering of
 data within the snapshot image, except for the image header that MAY be
index 4ac2d641fcb6a9b848fdb3c0761ea57b0ec25cb4..b3bd36668db3a4cef12a7996dfb51f78180da129 100644 (file)
@@ -6,6 +6,8 @@
     IBM Corp.
 (c) 2005 Becky Bruce <becky.bruce at freescale.com>,
     Freescale Semiconductor, FSL SOC and 32-bit additions
+(c) 2006 MontaVista Software, Inc.
+    Flash chip node definition
 
    May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet.
 
@@ -1693,6 +1695,43 @@ platforms are moved over to use the flattened-device-tree model.
                };
        };
 
+    g) Flash chip nodes
+
+    Flash chips (Memory Technology Devices) are often used for solid state
+    file systems on embedded devices.
+
+    Required properties:
+
+     - device_type : has to be "rom"
+     - compatible : Should specify what this ROM device is compatible with
+       (i.e. "onenand"). Currently, this is most likely to be "direct-mapped"
+       (which corresponds to the MTD physmap mapping driver).
+     - regs : Offset and length of the register set (or memory mapping) for
+       the device.
+
+    Recommended properties :
+
+     - bank-width : Width of the flash data bus in bytes. Required
+       for the NOR flashes (compatible == "direct-mapped" and others) ONLY.
+     - partitions : Several pairs of 32-bit values where the first value is
+       partition's offset from the start of the device and the second one is
+       partition size in bytes with LSB used to signify a read only
+       partititon (so, the parition size should always be an even number).
+     - partition-names : The list of concatenated zero terminated strings
+       representing the partition names.
+
+   Example:
+
+       flash@ff000000 {
+               device_type = "rom";
+               compatible = "direct-mapped";
+               regs = <ff000000 01000000>;
+               bank-width = <4>;
+               partitions = <00000000 00f80000
+                             00f80000 00080001>;
+               partition-names = "fs\0firmware";
+       };
+
    More devices will be defined as this spec matures.
 
 
diff --git a/Documentation/powerpc/mpc52xx-device-tree-bindings.txt b/Documentation/powerpc/mpc52xx-device-tree-bindings.txt
new file mode 100644 (file)
index 0000000..d077d76
--- /dev/null
@@ -0,0 +1,189 @@
+MPC52xx Device Tree Bindings
+----------------------------
+
+(c) 2006 Secret Lab Technologies Ltd
+Grant Likely <grant.likely at secretlab.ca>
+
+I - Introduction
+================
+Boards supported by the arch/powerpc architecture require device tree be
+passed by the boot loader to the kernel at boot time.  The device tree
+describes what devices are present on the board and how they are
+connected.  The device tree can either be passed as a binary blob (as
+described in Documentation/powerpc/booting-without-of.txt), or passed
+by Open Firmare (IEEE 1275) compatible firmware using an OF compatible
+client interface API.
+
+This document specifies the requirements on the device-tree for mpc52xx
+based boards.  These requirements are above and beyond the details
+specified in either the OpenFirmware spec or booting-without-of.txt
+
+All new mpc52xx-based boards are expected to match this document.  In
+cases where this document is not sufficient to support a new board port,
+this document should be updated as part of adding the new board support.
+
+II - Philosophy
+===============
+The core of this document is naming convention.  The whole point of
+defining this convention is to reduce or eliminate the number of
+special cases required to support a 52xx board.  If all 52xx boards
+follow the same convention, then generic 52xx support code will work
+rather than coding special cases for each new board.
+
+This section tries to capture the thought process behind why the naming
+convention is what it is.
+
+1. Node names
+-------------
+There is strong convention/requirements already established for children
+of the root node.  'cpus' describes the processor cores, 'memory'
+describes memory, and 'chosen' provides boot configuration.  Other nodes
+are added to describe devices attached to the processor local bus.
+Following convention already established with other system-on-chip
+processors, MPC52xx boards must have an 'soc5200' node as a child of the
+root node.
+
+The soc5200 node holds child nodes for all on chip devices.  Child nodes
+are typically named after the configured function.  ie. the FEC node is
+named 'ethernet', and a PSC in uart mode is named 'serial'.
+
+2. device_type property
+-----------------------
+similar to the node name convention above; the device_type reflects the
+configured function of a device.  ie. 'serial' for a uart and 'spi' for
+an spi controller.  However, while node names *should* reflect the
+configured function, device_type *must* match the configured function
+exactly.
+
+3. compatible property
+----------------------
+Since device_type isn't enough to match devices to drivers, there also
+needs to be a naming convention for the compatible property.  Compatible
+is an list of device descriptions sorted from specific to generic.  For
+the mpc52xx, the required format for each compatible value is
+<chip>-<device>[-<mode>].  At the minimum, the list shall contain two
+items; the first specifying the exact chip, and the second specifying
+mpc52xx for the chip.
+
+ie. ethernet on mpc5200b: compatible = "mpc5200b-ethernet\0mpc52xx-ethernet"
+
+The idea here is that most drivers will match to the most generic field
+in the compatible list (mpc52xx-*), but can also test the more specific
+field for enabling bug fixes or extra features.
+
+Modal devices, like PSCs, also append the configured function to the
+end of the compatible field.  ie. A PSC in i2s mode would specify
+"mpc52xx-psc-i2s", not "mpc52xx-i2s".  This convention is chosen to
+avoid naming conflicts with non-psc devices providing the same
+function.  For example, "mpc52xx-spi" and "mpc52xx-psc-spi" describe
+the mpc5200 simple spi device and a PSC spi mode respectively.
+
+If the soc device is more generic and present on other SOCs, the
+compatible property can specify the more generic device type also.
+
+ie. mscan: compatible = "mpc5200-mscan\0mpc52xx-mscan\0fsl,mscan";
+
+At the time of writing, exact chip may be either 'mpc5200' or
+'mpc5200b'.
+
+Device drivers should always try to match as generically as possible.
+
+III - Structure
+===============
+The device tree for an mpc52xx board follows the structure defined in
+booting-without-of.txt with the following additional notes:
+
+0) the root node
+----------------
+Typical root description node; see booting-without-of
+
+1) The cpus node
+----------------
+The cpus node follows the basic layout described in booting-without-of.
+The bus-frequency property holds the XLB bus frequency
+The clock-frequency property holds the core frequency
+
+2) The memory node
+------------------
+Typical memory description node; see booting-without-of.
+
+3) The soc5200 node
+-------------------
+This node describes the on chip SOC peripherals.  Every mpc52xx based
+board will have this node, and as such there is a common naming
+convention for SOC devices.
+
+Required properties:
+name                   type            description
+----                   ----            -----------
+device_type            string          must be "soc"
+ranges                 int             should be <0 baseaddr baseaddr+10000>
+reg                    int             must be <baseaddr 10000>
+
+Recommended properties:
+name                   type            description
+----                   ----            -----------
+compatible             string          should be "<chip>-soc\0mpc52xx-soc"
+                                       ie. "mpc5200b-soc\0mpc52xx-soc"
+#interrupt-cells       int             must be <3>.  If it is not defined
+                                       here then it must be defined in every
+                                       soc device node.
+bus-frequency          int             IPB bus frequency in HZ.  Clock rate
+                                       used by most of the soc devices.
+                                       Defining it here avoids needing it
+                                       added to every device node.
+
+4) soc5200 child nodes
+----------------------
+Any on chip SOC devices available to Linux must appear as soc5200 child nodes.
+
+Note: in the tables below, '*' matches all <chip> values.  ie.
+*-pic would translate to "mpc5200-pic\0mpc52xx-pic"
+
+Required soc5200 child nodes:
+name           device_type             compatible      Description
+----           -----------             ----------      -----------
+cdm@<addr>     cdm                     *-cmd           Clock Distribution
+pic@<addr>     interrupt-controller    *-pic           need an interrupt
+                                                       controller to boot
+bestcomm@<addr>        dma-controller          *-bestcomm      52xx pic also requires
+                                                       the bestcomm device
+
+Recommended soc5200 child nodes; populate as needed for your board
+name           device_type     compatible      Description
+----           -----------     ----------      -----------
+gpt@<addr>     gpt             *-gpt           General purpose timers
+rtc@<addr>     rtc             *-rtc           Real time clock
+mscan@<addr>   mscan           *-mscan         CAN bus controller
+pci@<addr>     pci             *-pci           PCI bridge
+serial@<addr>  serial          *-psc-uart      PSC in serial mode
+i2s@<addr>     i2s             *-psc-i2s       PSC in i2s mode
+ac97@<addr>    ac97            *-psc-ac97      PSC in ac97 mode
+spi@<addr>     spi             *-psc-spi       PSC in spi mode
+irda@<addr>    irda            *-psc-irda      PSC in IrDA mode
+spi@<addr>     spi             *-spi           MPC52xx spi device
+ethernet@<addr>        network         *-fec           MPC52xx ethernet device
+ata@<addr>     ata             *-ata           IDE ATA interface
+i2c@<addr>     i2c             *-i2c           I2C controller
+usb@<addr>     usb-ohci-be     *-ohci,ohci-be  USB controller
+xlb@<addr>     xlb             *-xlb           XLB arbritrator
+
+IV - Extra Notes
+================
+
+1. Interrupt mapping
+--------------------
+The mpc52xx pic driver splits hardware IRQ numbers into two levels.  The
+split reflects the layout of the PIC hardware itself, which groups
+interrupts into one of three groups; CRIT, MAIN or PERP.  Also, the
+Bestcomm dma engine has it's own set of interrupt sources which are
+cascaded off of peripheral interrupt 0, which the driver interprets as a
+fourth group, SDMA.
+
+The interrupts property for device nodes using the mpc52xx pic consists
+of three cells; <L1 L2 level>
+
+    L1 := [CRIT=0, MAIN=1, PERP=2, SDMA=3]
+    L2 := interrupt number; directly mapped from the value in the
+          "ICTL PerStat, MainStat, CritStat Encoded Register"
+    level := [LEVEL_HIGH=0, EDGE_RISING=1, EDGE_FALLING=2, LEVEL_LOW=3]
index 75a535a975c361986b40aa881cda3a49997455ae..6f70f2b9327e1f0db7bc05bdbf2d6ce3b2fcbdcf 100644 (file)
@@ -375,7 +375,6 @@ Summary:
    scsi_add_device - creates new scsi device (lu) instance
    scsi_add_host - perform sysfs registration and set up transport class
    scsi_adjust_queue_depth - change the queue depth on a SCSI device
-   scsi_assign_lock - replace default host_lock with given lock
    scsi_bios_ptable - return copy of block device's partition table
    scsi_block_requests - prevent further commands being queued to given host
    scsi_deactivate_tcq - turn off tag command queueing
@@ -488,20 +487,6 @@ void scsi_adjust_queue_depth(struct scsi_device * sdev, int tagged,
                              int tags)
 
 
-/**
- * scsi_assign_lock - replace default host_lock with given lock
- * @shost: a pointer to a scsi host instance
- * @lock: pointer to lock to replace host_lock for this host
- *
- *      Returns nothing
- *
- *      Might block: no
- *
- *      Defined in: include/scsi/scsi_host.h .
- **/
-void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
-
-
 /**
  * scsi_bios_ptable - return copy of block device's partition table
  * @dev:        pointer to block device
@@ -1366,17 +1351,11 @@ Locks
 Each struct Scsi_Host instance has a spin_lock called struct 
 Scsi_Host::default_lock which is initialized in scsi_host_alloc() [found in 
 hosts.c]. Within the same function the struct Scsi_Host::host_lock pointer
-is initialized to point at default_lock with the scsi_assign_lock() function.
-Thereafter lock and unlock operations performed by the mid level use the
-struct Scsi_Host::host_lock pointer.
-
-LLDs can override the use of struct Scsi_Host::default_lock by
-using scsi_assign_lock(). The earliest opportunity to do this would
-be in the detect() function after it has invoked scsi_register(). It
-could be replaced by a coarser grain lock (e.g. per driver) or a
-lock of equal granularity (i.e. per host). Using finer grain locks 
-(e.g. per SCSI device) may be possible by juggling locks in
-queuecommand().
+is initialized to point at default_lock.  Thereafter lock and unlock
+operations performed by the mid level use the struct Scsi_Host::host_lock
+pointer.  Previously drivers could override the host_lock pointer but
+this is not allowed anymore.
+
 
 Autosense
 =========
index f39c9d714db3d6bf2f6440d2f6cf9353057eeae5..a2afca3b2bab6fb923fb9eda102073606d15c278 100644 (file)
@@ -62,9 +62,6 @@ consider the following facts about the Linux kernel:
       - different structures can contain different fields
       - Some functions may not be implemented at all, (i.e. some locks
        compile away to nothing for non-SMP builds.)
-      - Parameter passing of variables from function to function can be
-       done in different ways (the CONFIG_REGPARM option controls
-       this.)
       - Memory within the kernel can be aligned in different ways,
        depending on the build options.
   - Linux runs on a wide range of different processor architectures.
index 0bc7f1e3c9e6aa2e340bec8f512684351630fc9f..5922e84d913340b98891a8e583663ab55ad3c5a6 100644 (file)
@@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
 - hotplug
 - java-appletviewer           [ binfmt_java, obsolete ]
 - java-interpreter            [ binfmt_java, obsolete ]
+- kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/kmod.txt
 - msgmax
@@ -170,6 +171,13 @@ This flag controls the L2 cache of G3 processor boards. If
 
 ==============================================================
 
+kstack_depth_to_print: (X86 only)
+
+Controls the number of words to print when dumping the raw
+kernel stack.
+
+==============================================================
+
 osrelease, ostype & version:
 
 # cat osrelease
index f3c57f43ba6460e5b5927eaee2c86e2cfcaf5fa3..dbdcaf68e3ea382304784bf66fce502216f1d1f1 100644 (file)
@@ -52,10 +52,6 @@ APICs
                 apicmaintimer. Useful when your PIT timer is totally
                 broken.
 
-   disable_8254_timer / enable_8254_timer
-                Enable interrupt 0 timer routing over the 8254 in addition to over
-                the IO-APIC. The kernel tries to set a sensible default.
-
 Early Console
 
    syntax: earlyprintk=vga
@@ -183,7 +179,7 @@ PCI
 IOMMU
 
  iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
-         [,forcesac][,fullflush][,nomerge][,noaperture]
+         [,forcesac][,fullflush][,nomerge][,noaperture][,calgary]
    size  set size of iommu (in bytes)
    noagp don't initialize the AGP driver and use full aperture.
    off   don't use the IOMMU
@@ -204,6 +200,7 @@ IOMMU
            buffering.
    nodac    Forbid DMA >4GB
    panic    Always panic when IOMMU overflows
+   calgary  Use the Calgary IOMMU if it is available
 
   swiotlb=pages[,force]
 
index 8385a69138a802cb42658f60e1363f03ba684e80..cf24400213f8fc75b51d2f93bbafa60ac0a5593c 100644 (file)
@@ -1091,13 +1091,19 @@ M:      miku@iki.fi
 S:     Maintained
 
 EXT2 FILE SYSTEM
-L:     ext2-devel@lists.sourceforge.net
+L:     linux-ext4@vger.kernel.org
 S:     Maintained
 
 EXT3 FILE SYSTEM
 P:     Stephen Tweedie, Andrew Morton
 M:     sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
-L:     ext2-devel@lists.sourceforge.net
+L:     linux-ext4@vger.kernel.org
+S:     Maintained
+
+EXT4 FILE SYSTEM
+P:     Stephen Tweedie, Andrew Morton
+M:     sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com
+L:     linux-ext4@vger.kernel.org
 S:     Maintained
 
 F71805F HARDWARE MONITORING DRIVER
@@ -1214,7 +1220,8 @@ HARDWARE MONITORING
 P:     Jean Delvare
 M:     khali@linux-fr.org
 L:     lm-sensors@lm-sensors.org
-W:     http://www.lm-sensors.nu/
+W:     http://www.lm-sensors.org/
+T:     quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-hwmon/
 S:     Maintained
 
 HARDWARE RANDOM NUMBER GENERATOR CORE
@@ -1340,8 +1347,7 @@ I2C SUBSYSTEM
 P:     Jean Delvare
 M:     khali@linux-fr.org
 L:     i2c@lm-sensors.org
-W:     http://www.lm-sensors.nu/
-T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:     quilt http://khali.linux-fr.org/devel/linux-2.6/jdelvare-i2c/
 S:     Maintained
 
 I2O
@@ -1673,7 +1679,7 @@ S:        Supported
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
 P:     Stephen Tweedie, Andrew Morton
 M:     sct@redhat.com, akpm@osdl.org
-L:     ext2-devel@lists.sourceforge.net
+L:     linux-ext4@vger.kernel.org
 S:     Maintained
 
 K8TEMP HARDWARE MONITORING DRIVER
@@ -2438,6 +2444,13 @@ M:       promise@pnd-pc.demon.co.uk
 W:     http://www.pnd-pc.demon.co.uk/promise/
 S:     Maintained
 
+PS3 PLATFORM SUPPORT
+P:     Geoff Levand
+M:     geoffrey.levand@am.sony.com
+L:     linuxppc-dev@ozlabs.org
+L:     cbe-oss-dev@ozlabs.org
+S:     Supported
+
 PVRUSB2 VIDEO4LINUX DRIVER
 P:     Mike Isely
 M:     isely@pobox.com
@@ -2906,7 +2919,6 @@ S:        Maintained
 SUN3/3X
 P:     Sam Creasey
 M:     sammy@sammy.net
-L:     sun3-list@redhat.com
 W:     http://sammy.net/sun3/
 S:     Maintained
 
@@ -3447,6 +3459,12 @@ W:       http://oss.sgi.com/projects/xfs
 T:     git git://oss.sgi.com:8090/xfs/xfs-2.6
 S:     Supported
 
+XILINX UARTLITE SERIAL DRIVER
+P:     Peter Korsgaard
+M:     jacmet@sunsite.dk
+L:     linux-serial@vger.kernel.org
+S:     Maintained
+
 X86 3-LEVEL PAGING (PAE) SUPPORT
 P:     Ingo Molnar
 M:     mingo@redhat.com
diff --git a/README b/README
index 3e264723b863add275e6f32c44762f8d09cfe0a0..c0556152302981b8369325d17d5d5634af064be9 100644 (file)
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-       Linux kernel release 2.6.xx <http://kernel.org>
+       Linux kernel release 2.6.xx <http://kernel.org/>
 
 These are the release notes for Linux version 2.6.  Read them carefully,
 as they tell you what this is all about, explain how to install the
@@ -22,15 +22,17 @@ ON WHAT HARDWARE DOES IT RUN?
 
   Although originally developed first for 32-bit x86-based PCs (386 or higher),
   today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and
-  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH,
+  UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell,
   IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS,
-  and Renesas M32R architectures.
+  Cris, Xtensa, AVR32 and Renesas M32R architectures.
 
   Linux is easily portable to most general-purpose 32- or 64-bit architectures
   as long as they have a paged memory management unit (PMMU) and a port of the
   GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has
   also been ported to a number of architectures without a PMMU, although
   functionality is then obviously somewhat limited.
+  Linux has also been ported to itself. You can now run the kernel as a
+  userspace application - this is called UserMode Linux (UML).
 
 DOCUMENTATION:
 
@@ -113,6 +115,7 @@ INSTALLING the kernel:
    version 2.6.12.2 and want to jump to 2.6.12.3, you must first
    reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying
    the 2.6.12.3 patch.
+   You can read more on this in Documentation/applying-patches.txt
 
  - Make sure you have no stale .o files and dependencies lying around:
 
@@ -161,6 +164,7 @@ CONFIGURING the kernel:
    only ask you for the answers to new questions.
 
  - Alternate configuration commands are:
+       "make config"      Plain text interface.
        "make menuconfig"  Text based color menus, radiolists & dialogs.
        "make xconfig"     X windows (Qt) based configuration tool.
        "make gconfig"     X windows (Gtk) based configuration tool.
@@ -303,8 +307,9 @@ IF SOMETHING GOES WRONG:
 
  - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump
    as is, otherwise you will have to use the "ksymoops" program to make
-   sense of the dump.  This utility can be downloaded from
-   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops.
+   sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred).
+   This utility can be downloaded from
+   ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ .
    Alternately you can do the dump lookup by hand:
 
  - In debugging dumps like the above, it helps enormously if you can
@@ -336,7 +341,7 @@ IF SOMETHING GOES WRONG:
 
    If you for some reason cannot do the above (you have a pre-compiled
    kernel image or similar), telling me as much about your setup as
-   possible will help. 
+   possible will help.  Please read the REPORTING-BUGS document for details.
 
  - Alternately, you can use gdb on a running kernel. (read-only; i.e. you
    cannot change values or set break points.) To do this, first compile the
index f9da827a0c1880154757981e1430f01f071181f2..ac02e42a2627f359575ae0e689afca911a75e0ff 100644 (file)
@@ -40,7 +40,9 @@ summary from [1.]>" for easy identification by the developers.
 [1.] One line summary of the problem:
 [2.] Full description of the problem/report:
 [3.] Keywords (i.e., modules, networking, kernel):
-[4.] Kernel version (from /proc/version):
+[4.] Kernel information
+[4.1.] Kernel version (from /proc/version):
+[4.2.] Kernel .config file:
 [5.] Most recent kernel version which did not have the bug:
 [6.] Output of Oops.. message (if applicable) with symbolic information
      resolved (see Documentation/oops-tracing.txt)
index ffb7d5423cc00210fc99e1bf5d6b98cd8d68f1fb..3c10b9a1ddf51fa138bb7e7de192d4d70e4d4544 100644 (file)
@@ -516,10 +516,11 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
                if (bus == 0 && dfn == 0) {
                        hose = pci_isa_hose;
                } else {
-                       dev = pci_find_slot(bus, dfn);
+                       dev = pci_get_bus_and_slot(bus, dfn);
                        if (!dev)
                                return -ENODEV;
                        hose = dev->sysdata;
+                       pci_dev_put(dev);
                }
        }
 
index b8b817feb1eedb8db7be5baa8df5639cb02a4b0f..910b43cd63e830b7f1a8664b3a4833b1a81e2e8e 100644 (file)
@@ -183,11 +183,15 @@ miata_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 
        if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) {
                u8 irq=0;
-
-               if(pci_read_config_byte(pci_find_slot(dev->bus->number, dev->devfn & ~(7)), 0x40,&irq)!=PCIBIOS_SUCCESSFUL)
+               struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7);
+               if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) {
+                       pci_dev_put(pdev);
                        return -1;
-               else    
+               }
+               else    {
+                       pci_dev_put(pdev);
                        return irq;
+               }
        }
 
        return COMMON_TABLE_LOOKUP;
index 93744bab73fb24ac63d6ba9406a4a3c4d0a294fb..e7594a7cf5850830c0d5b75ccece2e0914a842bd 100644 (file)
@@ -200,7 +200,7 @@ nautilus_init_pci(void)
        bus = pci_scan_bus(0, alpha_mv.pci_ops, hose);
        hose->bus = bus;
 
-       irongate = pci_find_slot(0, 0);
+       irongate = pci_get_bus_and_slot(0, 0);
        bus->self = irongate;
        bus->resource[1] = &irongate_mem;
 
index 8871529a34e2026e4f2bd4eda531df7ec59808a4..8aa9db834c11551afc1462bf970d2dbc0ab8a157 100644 (file)
@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
 
        /* If we're in an interrupt context, or have no user context,
           we must not take the fault.  */
-       if (!mm || in_interrupt())
+       if (!mm || in_atomic())
                goto no_context;
 
 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
index 605dedf967907cf3f360f194e467ff5b0b5b7375..b3599743093ba8f5ece1bc6a3753ee4eb70ee852 100644 (file)
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void);
 static int sharpsl_fatal_check(void);
 static int sharpsl_average_value(int ad);
 static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
+static void sharpsl_charge_toggle(struct work_struct *private_);
+static void sharpsl_battery_thread(struct work_struct *private_);
 
 
 /*
  * Variables
  */
 struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle);
+DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread);
 DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger);
 
 
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void)
 EXPORT_SYMBOL(sharpsl_battery_kick);
 
 
-static void sharpsl_battery_thread(void *private_)
+static void sharpsl_battery_thread(struct work_struct *private_)
 {
        int voltage, percent, apm_status, i = 0;
 
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_)
        /* Corgi cannot confirm when battery fully charged so periodically kick! */
        if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON)
                        && time_after(jiffies, sharpsl_pm.charge_start_time +  SHARPSL_CHARGE_ON_TIME_INTERVAL))
-               schedule_work(&toggle_charger);
+               schedule_delayed_work(&toggle_charger, 0);
 
        while(1) {
                voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void)
        sharpsl_pm_led(SHARPSL_LED_OFF);
        sharpsl_pm.charge_mode = CHRG_OFF;
 
-       schedule_work(&sharpsl_bat);
+       schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 static void sharpsl_charge_error(void)
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void)
        sharpsl_pm.charge_mode = CHRG_ERROR;
 }
 
-static void sharpsl_charge_toggle(void *private_)
+static void sharpsl_charge_toggle(struct work_struct *private_)
 {
        dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
 
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data)
        else if (sharpsl_pm.charge_mode == CHRG_ON)
                sharpsl_charge_off();
 
-       schedule_work(&sharpsl_bat);
+       schedule_delayed_work(&sharpsl_bat, 0);
 }
 
 
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data)
                        sharpsl_charge_off();
        } else if (sharpsl_pm.full_count < 2) {
                dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
-               schedule_work(&toggle_charger);
+               schedule_delayed_work(&toggle_charger, 0);
        } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
                dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
-               schedule_work(&toggle_charger);
+               schedule_delayed_work(&toggle_charger, 0);
        } else {
                sharpsl_charge_off();
                sharpsl_pm.charge_mode = CHRG_DONE;
index 48cf7fffddf2add8d663fc4db96b019ed3eaea04..f38a60a03b8c93f5b2e36fde8b985dc7196e47a3 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/signal.h>
 #include <linux/ptrace.h>
 #include <linux/personality.h>
+#include <linux/freezer.h>
 
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
index f225a083dee1bb13073127a6cc05c6a21e199cfa..9d2346fb68f41da85c51dd0211a6e826121c0b33 100644 (file)
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode)
 
        cancel_delayed_work(&irda_config->gpio_expa);
        PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-       schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+       schedule_delayed_work(&irda_config->gpio_expa, 0);
 
        return 0;
 }
index dbc555d209ff914eeff8b0b51d77d814212e430b..cbe909bad79b3fde3e1b064b69bdb78e081b56f1 100644 (file)
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = {
        .rows           = 8,
        .cols           = 8,
        .keymap         = nokia770_keymap,
-       .keymapsize     = ARRAY_SIZE(nokia770_keymap)
+       .keymapsize     = ARRAY_SIZE(nokia770_keymap),
        .delay          = 4,
 };
 
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void)
                printk("HP connected\n");
 }
 
-static void codec_delayed_power_down(void *arg)
+static void codec_delayed_power_down(struct work_struct *work)
 {
        down(&audio_pwr_sem);
        if (audio_pwr_state == -1)
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg)
        up(&audio_pwr_sem);
 }
 
-static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL);
+static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down);
 
 static void nokia770_audio_pwr_down(void)
 {
index a611c3b6395491e250cd50be7005b67c5171e307..6dcd10ab4496f765a7681731e85c1a8ee7c36f30 100644 (file)
@@ -55,7 +55,7 @@ static inline void omap_init_irda(void) {}
 
 /*-------------------------------------------------------------------------*/
 
-#if    defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
+#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
 
 #define        OMAP_RTC_BASE           0xfffb4800
 
index 3b29e59b0e6f47d3da318ae426483ab304a42518..0cbf1b0071f8737057205590952a77be2296763c 100644 (file)
@@ -35,7 +35,7 @@ static u8 hw_led_state;
 
 static u8 tps_leds_change;
 
-static void tps_work(void *unused)
+static void tps_work(struct work_struct *unused)
 {
        for (;;) {
                u8      leds;
@@ -61,7 +61,7 @@ static void tps_work(void *unused)
        }
 }
 
-static DECLARE_WORK(work, tps_work, NULL);
+static DECLARE_WORK(work, tps_work);
 
 #ifdef CONFIG_OMAP_OSK_MISTRAL
 
index 26a95a642ad7a1f4dde8227b034f22120ba554b0..3b1ad1d981a359288dc67c84c6f5416e5b1042c7 100644 (file)
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode)
 
        cancel_delayed_work(&irda_config->gpio_expa);
        PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode);
-       schedule_work(&irda_config->gpio_expa);
+#error this is not permitted - mode is an argument variable
+       schedule_delayed_work(&irda_config->gpio_expa, 0);
 
        return 0;
 }
index 1b398742ab564dc0a874ec20a7e00553feb249e9..12d2fe0ceff6c8032fc90eab06370f114146d1a6 100644 (file)
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD;
 
 static int max7310_write(struct i2c_client *client, int address, int data);
 static struct i2c_client max7310_template;
-static void akita_ioexp_work(void *private_);
+static void akita_ioexp_work(struct work_struct *private_);
 
 static struct device *akita_ioexp_device;
 static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT;
-DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL);
+DECLARE_WORK(akita_ioexp, akita_ioexp_work);
 
 
 /*
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit)
 EXPORT_SYMBOL(akita_set_ioexp);
 EXPORT_SYMBOL(akita_reset_ioexp);
 
-static void akita_ioexp_work(void *private_)
+static void akita_ioexp_work(struct work_struct *private_)
 {
        if (akita_ioexp_device)
                max7310_set_ouputs(akita_ioexp_device, ioexp_output_value);
index 3d211dc2f2f90923136b55a8e814e842463e49d0..01abb0ace234ad823df9ad39b90ab719f1167d4e 100644 (file)
@@ -40,7 +40,7 @@
 
 /* io map for dma */
 static void __iomem *dma_base;
-static kmem_cache_t *dma_kmem;
+static struct kmem_cache *dma_kmem;
 
 struct s3c24xx_dma_selection dma_sel;
 
@@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = {
 
 /* kmem cache implementation */
 
-static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
+static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
 {
        memset(p, 0, sizeof(struct s3c2410_dma_buf));
 }
index 5e658a8744984688ee171e16b67a44aee65c1331..9fd6d2eafb40c0bf06e80c0643f7d5bb020f8173 100644 (file)
@@ -230,7 +230,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        /*
index 047d0a408b9deb25c10a9db2d32c144353ccf193..43dd41be71fb80ac1ac7f0898924c77902b9bd5f 100644 (file)
@@ -620,12 +620,10 @@ ecard_probe(int slot, card_type_t type)
        struct ex_ecid cid;
        int i, rc = -ENOMEM;
 
-       ec = kmalloc(sizeof(ecard_t), GFP_KERNEL);
+       ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
        if (!ec)
                goto nomem;
 
-       memset(ec, 0, sizeof(ecard_t));
-
        ec->slot_no     = slot;
        ec->type        = type;
        ec->irq         = NO_IRQ;
index a1f6d8a9cc32919084d14b5cf9181dbceb013ca6..93c0cee0fb5e142e45c1344548943e48d88bca06 100644 (file)
@@ -215,7 +215,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        down_read(&mm->mmap_sem);
index 34def6397c3c5c718227f7e17c7dae4877d9cb03..f2901581d4dacd9aba26982c85918cd674f1dbcc 100644 (file)
@@ -24,7 +24,7 @@
 
 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
 
-kmem_cache_t *pte_cache, *pgd_cache;
+struct kmem_cache *pte_cache, *pgd_cache;
 int page_nr;
 
 /*
@@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi)
 {
 }
 
-static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
 {
        memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
 }
 
-static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
+static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
 {
        memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
 }
index ca41fc1edbe1aa26711343a3fcae488899de7882..d0abbcaf1c1e3dac5a6d7eb34313ca0527003d04 100644 (file)
@@ -154,6 +154,7 @@ ss_probe:
        return 1;
 
 no_kprobe:
+       preempt_enable_no_resched();
        return ret;
 }
 
index 33096651c24f64f494b1c4355f8de60fb155af82..0ec14854a2000f272044706265cf0e400c120e77 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 #include <asm/ucontext.h>
index 44ab8a7bdae2705b32f316be2dd7b9947487cd63..b68d669f823de0a39f1f8bedebc3f5bd13429264 100644 (file)
@@ -11,7 +11,7 @@
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 
-void dma_cache_sync(void *vaddr, size_t size, int direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 {
        /*
         * No need to sync an uncached area
index 934c51078ccee4bafca2b68b7752b1745a63ee03..c73e91f1299a773ec8964f512f169429e16b6175 100644 (file)
@@ -232,7 +232,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
         * context, we must not take the fault..
         */
 
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        down_read(&mm->mmap_sem);
index eae874a970c62f6750b0c6d41a56ab76078f7ac0..14f64b054c7ebe726ee0388f5ee5fe068dc7669c 100644 (file)
@@ -10,9 +10,9 @@
  */
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 /*
  * the various futex operations; MMU fault checking is ignored under no-MMU
@@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
                break;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index a8c61dac1cee29185ee234cb913a680789cbdc13..1a5eb6c301c9b0e833d389953d76650eb70390fe 100644 (file)
@@ -947,7 +947,7 @@ static void __init setup_linux_memory(void)
        if (LOADER_TYPE && INITRD_START) {
                if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
                        reserve_bootmem(INITRD_START, INITRD_SIZE);
-                       initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+                       initrd_start = INITRD_START + PAGE_OFFSET;
                        initrd_end = initrd_start + INITRD_SIZE;
                }
                else {
index b8a5882b862562332ee0833a0f75cf5aedc2f419..85baeae9666aeae35f0a1e66277500c99c42a697 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
index 8b3eb50c510544c57d9a0cbc4093575400ba52cf..3f12296c3688856cec94e3aecc02522e00b6f7e7 100644 (file)
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        down_read(&mm->mmap_sem);
index f76dd03ddd99a3505ba250b2b5d55c15b24c0372..19b13be114a2695a9876b21f897f44b837feee06 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/cacheflush.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
-kmem_cache_t *pgd_cache;
+struct kmem_cache *pgd_cache;
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
@@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
                set_page_private(next, (unsigned long) pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags;
 
@@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
 
index 1077b71d52264c4f653d41f0889f6a4e22a0ffb6..6adf8f41d2a1f153fdf7ba9b1fc9598cb0674329 100644 (file)
@@ -116,7 +116,7 @@ void __init setup_arch(char **cmdline_p)
 #endif
 #else
        if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && 
-           (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)
+           (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
            /* overlap userarea */
            memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
 #endif
index 7787f70a05bbaeee9e15188fc21f5cfe2d5f14bb..02955604d7606f76eabf4ee6bd3b0478aad1c10c 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/personality.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <asm/setup.h>
 #include <asm/uaccess.h>
index 756325dd480e40ba5cfe5056d23e2675d04ed6da..f05288be8878fb86efcf4bcae6b95fdafe69073d 100644 (file)
@@ -70,6 +70,7 @@ SECTIONS
 #endif
         .text :
        {
+       _text = .;
 #if defined(CONFIG_ROMKERNEL)
        *(.int_redirect)
 #endif
index 8ff1c6fb5aa135b678a3aa83047c1e57422ae6f8..ea70359b02d09660147fb5066684452c6f9c1698 100644 (file)
@@ -182,6 +182,17 @@ config X86_ES7000
 
 endchoice
 
+config PARAVIRT
+       bool "Paravirtualization support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       help
+         Paravirtualization is a way of running multiple instances of
+         Linux on the same machine, under a hypervisor.  This option
+         changes the kernel so it can modify itself when it is run
+         under a hypervisor, improving performance significantly.
+         However, when run without a hypervisor the kernel is
+         theoretically slower.  If in doubt, say N.
+
 config ACPI_SRAT
        bool
        default y
@@ -443,7 +454,8 @@ source "drivers/firmware/Kconfig"
 
 choice
        prompt "High Memory Support"
-       default NOHIGHMEM
+       default HIGHMEM4G if !X86_NUMAQ
+       default HIGHMEM64G if X86_NUMAQ
 
 config NOHIGHMEM
        bool "off"
@@ -710,20 +722,6 @@ config BOOT_IOREMAP
        depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
        default y
 
-config REGPARM
-       bool "Use register arguments"
-       default y
-       help
-       Compile the kernel with -mregparm=3. This instructs gcc to use
-       a more efficient function call ABI which passes the first three
-       arguments of a function call via registers, which results in denser
-       and faster code.
-
-       If this option is disabled, then the default ABI of passing
-       arguments via the stack is used.
-
-       If unsure, say Y.
-
 config SECCOMP
        bool "Enable seccomp to safely compute untrusted bytecode"
        depends on PROC_FS
@@ -773,23 +771,39 @@ config CRASH_DUMP
           PHYSICAL_START.
          For more details see Documentation/kdump/kdump.txt
 
-config PHYSICAL_START
-       hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+config RELOCATABLE
+       bool "Build a relocatable kernel(EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       help
+         This build a kernel image that retains relocation information
+          so it can be loaded someplace besides the default 1MB.
+         The relocations tend to the kernel binary about 10% larger,
+          but are discarded at runtime.
+
+         One use is for the kexec on panic case where the recovery kernel
+          must live at a different physical address than the primary
+          kernel.
 
-       default "0x1000000" if CRASH_DUMP
+config PHYSICAL_ALIGN
+       hex "Alignment value to which kernel should be aligned"
        default "0x100000"
+       range 0x2000 0x400000
        help
-         This gives the physical address where the kernel is loaded. Normally
-         for regular kernels this value is 0x100000 (1MB). But in the case
-         of kexec on panic the fail safe kernel needs to run at a different
-         address than the panic-ed kernel. This option is used to set the load
-         address for kernels used to capture crash dump on being kexec'ed
-         after panic. The default value for crash dump kernels is
-         0x1000000 (16MB). This can also be set based on the "X" value as
-         specified in the "crashkernel=YM@XM" command line boot parameter
-         passed to the panic-ed kernel. Typically this parameter is set as
-         crashkernel=64M@16M. Please take a look at
-         Documentation/kdump/kdump.txt for more details about crash dumps.
+         This value puts the alignment restrictions on physical address
+         where kernel is loaded and run from. Kernel is compiled for an
+         address which meets above alignment restriction.
+
+         If bootloader loads the kernel at a non-aligned address and
+         CONFIG_RELOCATABLE is set, kernel will move itself to nearest
+         address aligned to above value and run from there.
+
+         If bootloader loads the kernel at a non-aligned address and
+         CONFIG_RELOCATABLE is not set, kernel will ignore the run time
+         load address and decompress itself to the address it has been
+         compiled for and run from there. The address for which kernel is
+         compiled already meets above alignment restrictions. Hence the
+         end result is that kernel runs from a physical address meeting
+         above alignment restrictions.
 
          Don't change this unless you know what you are doing.
 
index fc4f2abccf06fdfd426b20b8195f1ae25d873c53..821fd269ca580137c5e959e1398f9ff17fb05089 100644 (file)
@@ -103,8 +103,15 @@ config MPENTIUMM
          Select this for Intel Pentium M (not Pentium-4 M)
          notebook chips.
 
+config MCORE2
+       bool "Core 2/newer Xeon"
+       help
+         Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
+         CPUs. You can distingush newer from older Xeons by the CPU family
+         in /proc/cpuinfo. Newer ones have 6.
+
 config MPENTIUM4
-       bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+       bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
        help
          Select this for Intel Pentium 4 chips.  This includes the
          Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
@@ -229,7 +236,7 @@ config X86_L1_CACHE_SHIFT
        default "7" if MPENTIUM4 || X86_GENERIC
        default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
        default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-       default "6" if MK7 || MK8 || MPENTIUMM
+       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2
 
 config RWSEM_GENERIC_SPINLOCK
        bool
@@ -287,17 +294,17 @@ config X86_ALIGNMENT_16
 
 config X86_GOOD_APIC
        bool
-       depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
+       depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON || MCORE2
        default y
 
 config X86_INTEL_USERCOPY
        bool
-       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
+       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
        default y
 
 config X86_USE_PPRO_CHECKSUM
        bool
-       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
+       depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2
        default y
 
 config X86_USE_3DNOW
@@ -312,5 +319,5 @@ config X86_OOSTORE
 
 config X86_TSC
        bool
-       depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX) && !X86_NUMAQ
+       depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
        default y
index b31c0802e1ccf53f1e5a677e802c5e34166ebc54..f68cc6f215f8b1c9bff53c0f6668d4b792f3b30a 100644 (file)
@@ -85,4 +85,14 @@ config DOUBLEFAULT
           option saves about 4k and might cause you much additional grey
           hair.
 
+config DEBUG_PARAVIRT
+       bool "Enable some paravirtualization debugging"
+       default y
+       depends on PARAVIRT && DEBUG_KERNEL
+       help
+         Currently deliberately clobbers regs which are allowed to be
+         clobbered in inlined paravirt hooks, even in native mode.
+         If turning this off solves a problem, then DISABLE_INTERRUPTS() or
+         ENABLE_INTERRUPTS() is lying about what registers can be clobbered.
+
 endmenu
index 0677908dfa0600dd3f5cf9e315d4ad62154aca3f..f7ac1aea1d8a05e635828e1b8356da975123d3c3 100644 (file)
@@ -26,10 +26,12 @@ endif
 
 LDFLAGS                := -m elf_i386
 OBJCOPYFLAGS   := -O binary -R .note -R .comment -S
-LDFLAGS_vmlinux :=
+ifdef CONFIG_RELOCATABLE
+LDFLAGS_vmlinux := --emit-relocs
+endif
 CHECKFLAGS     += -D__i386__
 
-CFLAGS += -pipe -msoft-float
+CFLAGS += -pipe -msoft-float -mregparm=3
 
 # prevent gcc from keeping the stack 16 byte aligned
 CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
@@ -37,8 +39,6 @@ CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
 # CPU-specific tuning. Anything which can be shared with UML should go here.
 include $(srctree)/arch/i386/Makefile.cpu
 
-cflags-$(CONFIG_REGPARM) += -mregparm=3
-
 # temporary until string.h is fixed
 cflags-y += -ffreestanding
 
index a11befba26d5b78face98824b3aff15f4fcafc87..a32c031c90d7dacd552c53633992155674c9c50e 100644 (file)
@@ -32,6 +32,7 @@ cflags-$(CONFIG_MWINCHIP2)    += $(call cc-option,-march=winchip2,-march=i586)
 cflags-$(CONFIG_MWINCHIP3D)    += $(call cc-option,-march=winchip2,-march=i586)
 cflags-$(CONFIG_MCYRIXIII)     += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
 cflags-$(CONFIG_MVIAC3_2)      += $(call cc-option,-march=c3-2,-march=i686)
+cflags-$(CONFIG_MCORE2)                += -march=i686 $(call cc-option,-mtune=core2,$(call cc-option,-mtune=generic,-mtune=i686))
 
 # AMD Elan support
 cflags-$(CONFIG_X86_ELAN)      += -march=i486
index 258ea95224f63994df2534cea86ee8859575e42a..a661217f33ec4eeba811cb9d987f626f5f7c3838 100644 (file)
@@ -4,22 +4,42 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets                := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
+targets                := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o \
+                       vmlinux.bin.all vmlinux.relocs
 EXTRA_AFLAGS   := -traditional
 
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup_32
+LDFLAGS_vmlinux := -T
+CFLAGS_misc.o += -fPIC
+hostprogs-y    := relocs
 
-$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
+$(obj)/vmlinux: $(src)/vmlinux.lds $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
        $(call if_changed,ld)
        @:
 
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
+quiet_cmd_relocs = RELOCS  $@
+      cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
+$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
+       $(call if_changed,relocs)
+
+vmlinux.bin.all-y := $(obj)/vmlinux.bin
+vmlinux.bin.all-$(CONFIG_RELOCATABLE) += $(obj)/vmlinux.relocs
+quiet_cmd_relocbin = BUILD   $@
+      cmd_relocbin = cat $(filter-out FORCE,$^) > $@
+$(obj)/vmlinux.bin.all: $(vmlinux.bin.all-y) FORCE
+       $(call if_changed,relocbin)
+
+ifdef CONFIG_RELOCATABLE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
+       $(call if_changed,gzip)
+else
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
+endif
 
 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
 
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,ld)
index b5893e4ecd376b54a3044c09e035ff82bf5e923c..f395a4bb38bbe63740e0fe35a77decafe474dff4 100644 (file)
 #include <linux/linkage.h>
 #include <asm/segment.h>
 #include <asm/page.h>
+#include <asm/boot.h>
 
+.section ".text.head"
        .globl startup_32
-       
+
 startup_32:
        cld
        cli
@@ -37,93 +39,142 @@ startup_32:
        movl %eax,%es
        movl %eax,%fs
        movl %eax,%gs
+       movl %eax,%ss
 
-       lss stack_start,%esp
-       xorl %eax,%eax
-1:     incl %eax               # check that A20 really IS enabled
-       movl %eax,0x000000      # loop forever if it isn't
-       cmpl %eax,0x100000
-       je 1b
+/* Calculate the delta between where we were compiled to run
+ * at and where we were actually loaded at.  This can only be done
+ * with a short local call on x86.  Nothing  else will tell us what
+ * address we are running at.  The reserved chunk of the real-mode
+ * data at 0x34-0x3f are used as the stack for this calculation.
+ * Only 4 bytes are needed.
+ */
+       leal 0x40(%esi), %esp
+       call 1f
+1:     popl %ebp
+       subl $1b, %ebp
+
+/* %ebp contains the address we are loaded at by the boot loader and %ebx
+ * contains the address where we should move the kernel image temporarily
+ * for safe in-place decompression.
+ */
+
+#ifdef CONFIG_RELOCATABLE
+       movl    %ebp, %ebx
+       addl    $(CONFIG_PHYSICAL_ALIGN - 1), %ebx
+       andl    $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx
+#else
+       movl $LOAD_PHYSICAL_ADDR, %ebx
+#endif
+
+       /* Replace the compressed data size with the uncompressed size */
+       subl input_len(%ebp), %ebx
+       movl output_len(%ebp), %eax
+       addl %eax, %ebx
+       /* Add 8 bytes for every 32K input block */
+       shrl $12, %eax
+       addl %eax, %ebx
+       /* Add 32K + 18 bytes of extra slack */
+       addl $(32768 + 18), %ebx
+       /* Align on a 4K boundary */
+       addl $4095, %ebx
+       andl $~4095, %ebx
+
+/* Copy the compressed kernel to the end of our buffer
+ * where decompression in place becomes safe.
+ */
+       pushl %esi
+       leal _end(%ebp), %esi
+       leal _end(%ebx), %edi
+       movl $(_end - startup_32), %ecx
+       std
+       rep
+       movsb
+       cld
+       popl %esi
+
+/* Compute the kernel start address.
+ */
+#ifdef CONFIG_RELOCATABLE
+       addl    $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
+       andl    $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebp
+#else
+       movl    $LOAD_PHYSICAL_ADDR, %ebp
+#endif
 
 /*
- * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
- * confuse the debugger if this code is traced.
- * XXX - best to initialize before switching to protected mode.
+ * Jump to the relocated address.
  */
-       pushl $0
-       popfl
+       leal relocated(%ebx), %eax
+       jmp *%eax
+.section ".text"
+relocated:
+
 /*
  * Clear BSS
  */
        xorl %eax,%eax
-       movl $_edata,%edi
-       movl $_end,%ecx
+       leal _edata(%ebx),%edi
+       leal _end(%ebx), %ecx
        subl %edi,%ecx
        cld
        rep
        stosb
+
+/*
+ * Setup the stack for the decompressor
+ */
+       leal stack_end(%ebx), %esp
+
 /*
  * Do the decompression, and jump to the new kernel..
  */
-       subl $16,%esp   # place for structure on the stack
-       movl %esp,%eax
+       movl output_len(%ebx), %eax
+       pushl %eax
+       pushl %ebp      # output address
+       movl input_len(%ebx), %eax
+       pushl %eax      # input_len
+       leal input_data(%ebx), %eax
+       pushl %eax      # input_data
+       leal _end(%ebx), %eax
+       pushl %eax      # end of the image as third argument
        pushl %esi      # real mode pointer as second arg
-       pushl %eax      # address of structure as first arg
        call decompress_kernel
-       orl  %eax,%eax 
-       jnz  3f
-       popl %esi       # discard address
-       popl %esi       # real mode pointer
-       xorl %ebx,%ebx
-       ljmp $(__BOOT_CS), $__PHYSICAL_START
+       addl $20, %esp
+       popl %ecx
 
+#if CONFIG_RELOCATABLE
+/* Find the address of the relocations.
+ */
+       movl %ebp, %edi
+       addl %ecx, %edi
+
+/* Calculate the delta between where vmlinux was compiled to run
+ * and where it was actually loaded.
+ */
+       movl %ebp, %ebx
+       subl $LOAD_PHYSICAL_ADDR, %ebx
+       jz   2f         /* Nothing to be done if loaded at compiled addr. */
 /*
- * We come here, if we were loaded high.
- * We need to move the move-in-place routine down to 0x1000
- * and then start it with the buffer addresses in registers,
- * which we got from the stack.
+ * Process relocations.
  */
-3:
-       movl $move_routine_start,%esi
-       movl $0x1000,%edi
-       movl $move_routine_end,%ecx
-       subl %esi,%ecx
-       addl $3,%ecx
-       shrl $2,%ecx
-       cld
-       rep
-       movsl
-
-       popl %esi       # discard the address
-       popl %ebx       # real mode pointer
-       popl %esi       # low_buffer_start
-       popl %ecx       # lcount
-       popl %edx       # high_buffer_start
-       popl %eax       # hcount
-       movl $__PHYSICAL_START,%edi
-       cli             # make sure we don't get interrupted
-       ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
+
+1:     subl $4, %edi
+       movl 0(%edi), %ecx
+       testl %ecx, %ecx
+       jz 2f
+       addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+       jmp 1b
+2:
+#endif
 
 /*
- * Routine (template) for moving the decompressed kernel in place,
- * if we were high loaded. This _must_ PIC-code !
+ * Jump to the decompressed kernel.
  */
-move_routine_start:
-       movl %ecx,%ebp
-       shrl $2,%ecx
-       rep
-       movsl
-       movl %ebp,%ecx
-       andl $3,%ecx
-       rep
-       movsb
-       movl %edx,%esi
-       movl %eax,%ecx  # NOTE: rep movsb won't move if %ecx == 0
-       addl $3,%ecx
-       shrl $2,%ecx
-       rep
-       movsl
-       movl %ebx,%esi  # Restore setup pointer
        xorl %ebx,%ebx
-       ljmp $(__BOOT_CS), $__PHYSICAL_START
-move_routine_end:
+       jmp *%ebp
+
+.bss
+.balign 4
+stack:
+       .fill 4096, 1, 0
+stack_end:
index b2ccd543410d51314a38f75048d71306d005a969..1ce7017fd62706ccb02e1d5b34c683bd6537677c 100644 (file)
@@ -9,11 +9,94 @@
  * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
  */
 
+#undef CONFIG_PARAVIRT
 #include <linux/linkage.h>
 #include <linux/vmalloc.h>
 #include <linux/screen_info.h>
 #include <asm/io.h>
 #include <asm/page.h>
+#include <asm/boot.h>
+
+/* WARNING!!
+ * This code is compiled with -fPIC and it is relocated dynamically
+ * at run time, but no relocation processing is performed.
+ * This means that it is not safe to place pointers in static structures.
+ */
+
+/*
+ * Getting to provable safe in place decompression is hard.
+ * Worst case behaviours need to be analized.
+ * Background information:
+ *
+ * The file layout is:
+ *    magic[2]
+ *    method[1]
+ *    flags[1]
+ *    timestamp[4]
+ *    extraflags[1]
+ *    os[1]
+ *    compressed data blocks[N]
+ *    crc[4] orig_len[4]
+ *
+ * resulting in 18 bytes of non compressed data overhead.
+ *
+ * Files divided into blocks
+ * 1 bit (last block flag)
+ * 2 bits (block type)
+ *
+ * 1 block occurs every 32K -1 bytes or when there 50% compression has been achieved.
+ * The smallest block type encoding is always used.
+ *
+ * stored:
+ *    32 bits length in bytes.
+ *
+ * fixed:
+ *    magic fixed tree.
+ *    symbols.
+ *
+ * dynamic:
+ *    dynamic tree encoding.
+ *    symbols.
+ *
+ *
+ * The buffer for decompression in place is the length of the
+ * uncompressed data, plus a small amount extra to keep the algorithm safe.
+ * The compressed data is placed at the end of the buffer.  The output
+ * pointer is placed at the start of the buffer and the input pointer
+ * is placed where the compressed data starts.  Problems will occur
+ * when the output pointer overruns the input pointer.
+ *
+ * The output pointer can only overrun the input pointer if the input
+ * pointer is moving faster than the output pointer.  A condition only
+ * triggered by data whose compressed form is larger than the uncompressed
+ * form.
+ *
+ * The worst case at the block level is a growth of the compressed data
+ * of 5 bytes per 32767 bytes.
+ *
+ * The worst case internal to a compressed block is very hard to figure.
+ * The worst case can at least be boundined by having one bit that represents
+ * 32764 bytes and then all of the rest of the bytes representing the very
+ * very last byte.
+ *
+ * All of which is enough to compute an amount of extra data that is required
+ * to be safe.  To avoid problems at the block level allocating 5 extra bytes
+ * per 32767 bytes of data is sufficient.  To avoind problems internal to a block
+ * adding an extra 32767 bytes (the worst case uncompressed block size) is
+ * sufficient, to ensure that in the worst case the decompressed data for
+ * block will stop the byte before the compressed data for a block begins.
+ * To avoid problems with the compressed data's meta information an extra 18
+ * bytes are needed.  Leading to the formula:
+ *
+ * extra_bytes = (uncompressed_size >> 12) + 32768 + 18 + decompressor_size.
+ *
+ * Adding 8 bytes per 32K is a bit excessive but much easier to calculate.
+ * Adding 32768 instead of 32767 just makes for round numbers.
+ * Adding the decompressor_size is necessary as it musht live after all
+ * of the data as well.  Last I measured the decompressor is about 14K.
+ * 10K of actuall data and 4K of bss.
+ *
+ */
 
 /*
  * gzip declarations
@@ -30,15 +113,20 @@ typedef unsigned char  uch;
 typedef unsigned short ush;
 typedef unsigned long  ulg;
 
-#define WSIZE 0x8000           /* Window size must be at least 32k, */
-                               /* and a power of two */
+#define WSIZE 0x80000000       /* Window size must be at least 32k,
+                                * and a power of two
+                                * We don't actually have a window just
+                                * a huge output buffer so I report
+                                * a 2G windows size, as that should
+                                * always be larger than our output buffer.
+                                */
 
-static uch *inbuf;          /* input buffer */
-static uch window[WSIZE];    /* Sliding window buffer */
+static uch *inbuf;     /* input buffer */
+static uch *window;    /* Sliding window buffer, (and final output buffer) */
 
-static unsigned insize = 0;  /* valid bytes in inbuf */
-static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0;  /* bytes in output buffer */
+static unsigned insize;  /* valid bytes in inbuf */
+static unsigned inptr;   /* index of next byte to be processed in inbuf */
+static unsigned outcnt;  /* bytes in output buffer */
 
 /* gzip flag byte */
 #define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
@@ -89,8 +177,6 @@ extern unsigned char input_data[];
 extern int input_len;
 
 static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
 
 static void *malloc(int size);
 static void free(void *where);
@@ -100,24 +186,17 @@ static void *memcpy(void *dest, const void *src, unsigned n);
 
 static void putstr(const char *);
 
-extern int end;
-static long free_mem_ptr = (long)&end;
-static long free_mem_end_ptr;
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
 
-#define INPLACE_MOVE_ROUTINE  0x1000
-#define LOW_BUFFER_START      0x2000
-#define LOW_BUFFER_MAX       0x90000
 #define HEAP_SIZE             0x3000
-static unsigned int low_buffer_end, low_buffer_size;
-static int high_loaded =0;
-static uch *high_buffer_start /* = (uch *)(((ulg)&end) + HEAP_SIZE)*/;
 
 static char *vidmem = (char *)0xb8000;
 static int vidport;
 static int lines, cols;
 
 #ifdef CONFIG_X86_NUMAQ
-static void * xquad_portio = NULL;
+void *xquad_portio;
 #endif
 
 #include "../../../../lib/inflate.c"
@@ -151,7 +230,7 @@ static void gzip_mark(void **ptr)
 
 static void gzip_release(void **ptr)
 {
-       free_mem_ptr = (long) *ptr;
+       free_mem_ptr = (unsigned long) *ptr;
 }
  
 static void scroll(void)
@@ -179,7 +258,7 @@ static void putstr(const char *s)
                                y--;
                        }
                } else {
-                       vidmem [ ( x + cols * y ) * 2 ] = c; 
+                       vidmem [ ( x + cols * y ) * 2 ] = c;
                        if ( ++x >= cols ) {
                                x = 0;
                                if ( ++y >= lines ) {
@@ -224,58 +303,31 @@ static void* memcpy(void* dest, const void* src, unsigned n)
  */
 static int fill_inbuf(void)
 {
-       if (insize != 0) {
-               error("ran out of input data");
-       }
-
-       inbuf = input_data;
-       insize = input_len;
-       inptr = 1;
-       return inbuf[0];
+       error("ran out of input data");
+       return 0;
 }
 
 /* ===========================================================================
  * Write the output window window[0..outcnt-1] and update crc and bytes_out.
  * (Used for the decompressed data only.)
  */
-static void flush_window_low(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in, *out, ch;
-    
-    in = window;
-    out = &output_data[output_ptr]; 
-    for (n = 0; n < outcnt; n++) {
-           ch = *out++ = *in++;
-           c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    output_ptr += (ulg)outcnt;
-    outcnt = 0;
-}
-
-static void flush_window_high(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in,  ch;
-    in = window;
-    for (n = 0; n < outcnt; n++) {
-       ch = *output_data++ = *in++;
-       if ((ulg)output_data == low_buffer_end) output_data=high_buffer_start;
-       c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    outcnt = 0;
-}
-
 static void flush_window(void)
 {
-       if (high_loaded) flush_window_high();
-       else flush_window_low();
+       /* With my window equal to my output buffer
+        * I only need to compute the crc here.
+        */
+       ulg c = crc;         /* temporary variable */
+       unsigned n;
+       uch *in, ch;
+
+       in = window;
+       for (n = 0; n < outcnt; n++) {
+               ch = *in++;
+               c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+       }
+       crc = c;
+       bytes_out += (ulg)outcnt;
+       outcnt = 0;
 }
 
 static void error(char *x)
@@ -287,66 +339,8 @@ static void error(char *x)
        while(1);       /* Halt */
 }
 
-#define STACK_SIZE (4096)
-
-long user_stack [STACK_SIZE];
-
-struct {
-       long * a;
-       short b;
-       } stack_start = { & user_stack [STACK_SIZE] , __BOOT_DS };
-
-static void setup_normal_output_buffer(void)
-{
-#ifdef STANDARD_MEMORY_BIOS_CALL
-       if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
-#else
-       if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
-#endif
-       output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
-       free_mem_end_ptr = (long)real_mode;
-}
-
-struct moveparams {
-       uch *low_buffer_start;  int lcount;
-       uch *high_buffer_start; int hcount;
-};
-
-static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
-{
-       high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
-#ifdef STANDARD_MEMORY_BIOS_CALL
-       if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
-#else
-       if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
-#endif 
-       mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
-       low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
-         ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
-       low_buffer_size = low_buffer_end - LOW_BUFFER_START;
-       high_loaded = 1;
-       free_mem_end_ptr = (long)high_buffer_start;
-       if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
-               high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
-               mv->hcount = 0; /* say: we need not to move high_buffer */
-       }
-       else mv->hcount = -1;
-       mv->high_buffer_start = high_buffer_start;
-}
-
-static void close_output_buffer_if_we_run_high(struct moveparams *mv)
-{
-       if (bytes_out > low_buffer_size) {
-               mv->lcount = low_buffer_size;
-               if (mv->hcount)
-                       mv->hcount = bytes_out - low_buffer_size;
-       } else {
-               mv->lcount = bytes_out;
-               mv->hcount = 0;
-       }
-}
-
-asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
+asmlinkage void decompress_kernel(void *rmode, unsigned long end,
+                       uch *input_data, unsigned long input_len, uch *output)
 {
        real_mode = rmode;
 
@@ -361,13 +355,25 @@ asmlinkage int decompress_kernel(struct moveparams *mv, void *rmode)
        lines = RM_SCREEN_INFO.orig_video_lines;
        cols = RM_SCREEN_INFO.orig_video_cols;
 
-       if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
-       else setup_output_buffer_if_we_run_high(mv);
+       window = output;        /* Output buffer (Normally at 1M) */
+       free_mem_ptr     = end; /* Heap  */
+       free_mem_end_ptr = end + HEAP_SIZE;
+       inbuf  = input_data;    /* Input buffer */
+       insize = input_len;
+       inptr  = 0;
+
+       if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
+               error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
+       if (end > ((-__PAGE_OFFSET-(512 <<20)-1) & 0x7fffffff))
+               error("Destination address too large");
+#ifndef CONFIG_RELOCATABLE
+       if ((u32)output != LOAD_PHYSICAL_ADDR)
+               error("Wrong destination address");
+#endif
 
        makecrc();
        putstr("Uncompressing Linux... ");
        gunzip();
        putstr("Ok, booting the kernel.\n");
-       if (high_loaded) close_output_buffer_if_we_run_high(mv);
-       return high_loaded;
+       return;
 }
diff --git a/arch/i386/boot/compressed/relocs.c b/arch/i386/boot/compressed/relocs.c
new file mode 100644 (file)
index 0000000..468da89
--- /dev/null
@@ -0,0 +1,625 @@
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+
+#define MAX_SHDRS 100
+static Elf32_Ehdr ehdr;
+static Elf32_Shdr shdr[MAX_SHDRS];
+static Elf32_Sym  *symtab[MAX_SHDRS];
+static Elf32_Rel  *reltab[MAX_SHDRS];
+static char *strtab[MAX_SHDRS];
+static unsigned long reloc_count, reloc_idx;
+static unsigned long *relocs;
+
+/*
+ * Following symbols have been audited. There values are constant and do
+ * not change if bzImage is loaded at a different physical address than
+ * the address for which it has been compiled. Don't warn user about
+ * absolute relocations present w.r.t these symbols.
+ */
+static const char* safe_abs_relocs[] = {
+               "__kernel_vsyscall",
+               "__kernel_rt_sigreturn",
+               "__kernel_sigreturn",
+               "SYSENTER_RETURN",
+};
+
+static int is_safe_abs_reloc(const char* sym_name)
+{
+       int i, array_size;
+
+       array_size = sizeof(safe_abs_relocs)/sizeof(char*);
+
+       for(i = 0; i < array_size; i++) {
+               if (!strcmp(sym_name, safe_abs_relocs[i]))
+                       /* Match found */
+                       return 1;
+       }
+       return 0;
+}
+
+static void die(char *fmt, ...)
+{
+       va_list ap;
+       va_start(ap, fmt);
+       vfprintf(stderr, fmt, ap);
+       va_end(ap);
+       exit(1);
+}
+
+static const char *sym_type(unsigned type)
+{
+       static const char *type_name[] = {
+#define SYM_TYPE(X) [X] = #X
+               SYM_TYPE(STT_NOTYPE),
+               SYM_TYPE(STT_OBJECT),
+               SYM_TYPE(STT_FUNC),
+               SYM_TYPE(STT_SECTION),
+               SYM_TYPE(STT_FILE),
+               SYM_TYPE(STT_COMMON),
+               SYM_TYPE(STT_TLS),
+#undef SYM_TYPE
+       };
+       const char *name = "unknown sym type name";
+       if (type < sizeof(type_name)/sizeof(type_name[0])) {
+               name = type_name[type];
+       }
+       return name;
+}
+
+static const char *sym_bind(unsigned bind)
+{
+       static const char *bind_name[] = {
+#define SYM_BIND(X) [X] = #X
+               SYM_BIND(STB_LOCAL),
+               SYM_BIND(STB_GLOBAL),
+               SYM_BIND(STB_WEAK),
+#undef SYM_BIND
+       };
+       const char *name = "unknown sym bind name";
+       if (bind < sizeof(bind_name)/sizeof(bind_name[0])) {
+               name = bind_name[bind];
+       }
+       return name;
+}
+
+static const char *sym_visibility(unsigned visibility)
+{
+       static const char *visibility_name[] = {
+#define SYM_VISIBILITY(X) [X] = #X
+               SYM_VISIBILITY(STV_DEFAULT),
+               SYM_VISIBILITY(STV_INTERNAL),
+               SYM_VISIBILITY(STV_HIDDEN),
+               SYM_VISIBILITY(STV_PROTECTED),
+#undef SYM_VISIBILITY
+       };
+       const char *name = "unknown sym visibility name";
+       if (visibility < sizeof(visibility_name)/sizeof(visibility_name[0])) {
+               name = visibility_name[visibility];
+       }
+       return name;
+}
+
+static const char *rel_type(unsigned type)
+{
+       static const char *type_name[] = {
+#define REL_TYPE(X) [X] = #X
+               REL_TYPE(R_386_NONE),
+               REL_TYPE(R_386_32),
+               REL_TYPE(R_386_PC32),
+               REL_TYPE(R_386_GOT32),
+               REL_TYPE(R_386_PLT32),
+               REL_TYPE(R_386_COPY),
+               REL_TYPE(R_386_GLOB_DAT),
+               REL_TYPE(R_386_JMP_SLOT),
+               REL_TYPE(R_386_RELATIVE),
+               REL_TYPE(R_386_GOTOFF),
+               REL_TYPE(R_386_GOTPC),
+#undef REL_TYPE
+       };
+       const char *name = "unknown type rel type name";
+       if (type < sizeof(type_name)/sizeof(type_name[0])) {
+               name = type_name[type];
+       }
+       return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+       const char *sec_strtab;
+       const char *name;
+       sec_strtab = strtab[ehdr.e_shstrndx];
+       name = "<noname>";
+       if (shndx < ehdr.e_shnum) {
+               name = sec_strtab + shdr[shndx].sh_name;
+       }
+       else if (shndx == SHN_ABS) {
+               name = "ABSOLUTE";
+       }
+       else if (shndx == SHN_COMMON) {
+               name = "COMMON";
+       }
+       return name;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
+{
+       const char *name;
+       name = "<noname>";
+       if (sym->st_name) {
+               name = sym_strtab + sym->st_name;
+       }
+       else {
+               name = sec_name(shdr[sym->st_shndx].sh_name);
+       }
+       return name;
+}
+
+
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+       return le16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+       return le32_to_cpu(val);
+}
+
+static void read_ehdr(FILE *fp)
+{
+       if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
+               die("Cannot read ELF header: %s\n",
+                       strerror(errno));
+       }
+       if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) {
+               die("No ELF magic\n");
+       }
+       if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
+               die("Not a 32 bit executable\n");
+       }
+       if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
+               die("Not a LSB ELF executable\n");
+       }
+       if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
+               die("Unknown ELF version\n");
+       }
+       /* Convert the fields to native endian */
+       ehdr.e_type      = elf16_to_cpu(ehdr.e_type);
+       ehdr.e_machine   = elf16_to_cpu(ehdr.e_machine);
+       ehdr.e_version   = elf32_to_cpu(ehdr.e_version);
+       ehdr.e_entry     = elf32_to_cpu(ehdr.e_entry);
+       ehdr.e_phoff     = elf32_to_cpu(ehdr.e_phoff);
+       ehdr.e_shoff     = elf32_to_cpu(ehdr.e_shoff);
+       ehdr.e_flags     = elf32_to_cpu(ehdr.e_flags);
+       ehdr.e_ehsize    = elf16_to_cpu(ehdr.e_ehsize);
+       ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
+       ehdr.e_phnum     = elf16_to_cpu(ehdr.e_phnum);
+       ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
+       ehdr.e_shnum     = elf16_to_cpu(ehdr.e_shnum);
+       ehdr.e_shstrndx  = elf16_to_cpu(ehdr.e_shstrndx);
+
+       if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+               die("Unsupported ELF header type\n");
+       }
+       if (ehdr.e_machine != EM_386) {
+               die("Not for x86\n");
+       }
+       if (ehdr.e_version != EV_CURRENT) {
+               die("Unknown ELF version\n");
+       }
+       if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
+               die("Bad Elf header size\n");
+       }
+       if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
+               die("Bad program header entry\n");
+       }
+       if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
+               die("Bad section header entry\n");
+       }
+       if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+               die("String table index out of bounds\n");
+       }
+}
+
+static void read_shdrs(FILE *fp)
+{
+       int i;
+       if (ehdr.e_shnum > MAX_SHDRS) {
+               die("%d section headers supported: %d\n",
+                       ehdr.e_shnum, MAX_SHDRS);
+       }
+       if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
+               die("Seek to %d failed: %s\n",
+                       ehdr.e_shoff, strerror(errno));
+       }
+       if (fread(&shdr, sizeof(shdr[0]), ehdr.e_shnum, fp) != ehdr.e_shnum) {
+               die("Cannot read ELF section headers: %s\n",
+                       strerror(errno));
+       }
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               shdr[i].sh_name      = elf32_to_cpu(shdr[i].sh_name);
+               shdr[i].sh_type      = elf32_to_cpu(shdr[i].sh_type);
+               shdr[i].sh_flags     = elf32_to_cpu(shdr[i].sh_flags);
+               shdr[i].sh_addr      = elf32_to_cpu(shdr[i].sh_addr);
+               shdr[i].sh_offset    = elf32_to_cpu(shdr[i].sh_offset);
+               shdr[i].sh_size      = elf32_to_cpu(shdr[i].sh_size);
+               shdr[i].sh_link      = elf32_to_cpu(shdr[i].sh_link);
+               shdr[i].sh_info      = elf32_to_cpu(shdr[i].sh_info);
+               shdr[i].sh_addralign = elf32_to_cpu(shdr[i].sh_addralign);
+               shdr[i].sh_entsize   = elf32_to_cpu(shdr[i].sh_entsize);
+       }
+
+}
+
+static void read_strtabs(FILE *fp)
+{
+       int i;
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               if (shdr[i].sh_type != SHT_STRTAB) {
+                       continue;
+               }
+               strtab[i] = malloc(shdr[i].sh_size);
+               if (!strtab[i]) {
+                       die("malloc of %d bytes for strtab failed\n",
+                               shdr[i].sh_size);
+               }
+               if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+                       die("Seek to %d failed: %s\n",
+                               shdr[i].sh_offset, strerror(errno));
+               }
+               if (fread(strtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+                       die("Cannot read symbol table: %s\n",
+                               strerror(errno));
+               }
+       }
+}
+
+static void read_symtabs(FILE *fp)
+{
+       int i,j;
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               if (shdr[i].sh_type != SHT_SYMTAB) {
+                       continue;
+               }
+               symtab[i] = malloc(shdr[i].sh_size);
+               if (!symtab[i]) {
+                       die("malloc of %d bytes for symtab failed\n",
+                               shdr[i].sh_size);
+               }
+               if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+                       die("Seek to %d failed: %s\n",
+                               shdr[i].sh_offset, strerror(errno));
+               }
+               if (fread(symtab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+                       die("Cannot read symbol table: %s\n",
+                               strerror(errno));
+               }
+               for(j = 0; j < shdr[i].sh_size/sizeof(symtab[i][0]); j++) {
+                       symtab[i][j].st_name  = elf32_to_cpu(symtab[i][j].st_name);
+                       symtab[i][j].st_value = elf32_to_cpu(symtab[i][j].st_value);
+                       symtab[i][j].st_size  = elf32_to_cpu(symtab[i][j].st_size);
+                       symtab[i][j].st_shndx = elf16_to_cpu(symtab[i][j].st_shndx);
+               }
+       }
+}
+
+
+static void read_relocs(FILE *fp)
+{
+       int i,j;
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               if (shdr[i].sh_type != SHT_REL) {
+                       continue;
+               }
+               reltab[i] = malloc(shdr[i].sh_size);
+               if (!reltab[i]) {
+                       die("malloc of %d bytes for relocs failed\n",
+                               shdr[i].sh_size);
+               }
+               if (fseek(fp, shdr[i].sh_offset, SEEK_SET) < 0) {
+                       die("Seek to %d failed: %s\n",
+                               shdr[i].sh_offset, strerror(errno));
+               }
+               if (fread(reltab[i], 1, shdr[i].sh_size, fp) != shdr[i].sh_size) {
+                       die("Cannot read symbol table: %s\n",
+                               strerror(errno));
+               }
+               for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+                       reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset);
+                       reltab[i][j].r_info   = elf32_to_cpu(reltab[i][j].r_info);
+               }
+       }
+}
+
+
+static void print_absolute_symbols(void)
+{
+       int i;
+       printf("Absolute symbols\n");
+       printf(" Num:    Value Size  Type       Bind        Visibility  Name\n");
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               char *sym_strtab;
+               Elf32_Sym *sh_symtab;
+               int j;
+               if (shdr[i].sh_type != SHT_SYMTAB) {
+                       continue;
+               }
+               sh_symtab = symtab[i];
+               sym_strtab = strtab[shdr[i].sh_link];
+               for(j = 0; j < shdr[i].sh_size/sizeof(symtab[0][0]); j++) {
+                       Elf32_Sym *sym;
+                       const char *name;
+                       sym = &symtab[i][j];
+                       name = sym_name(sym_strtab, sym);
+                       if (sym->st_shndx != SHN_ABS) {
+                               continue;
+                       }
+                       printf("%5d %08x %5d %10s %10s %12s %s\n",
+                               j, sym->st_value, sym->st_size,
+                               sym_type(ELF32_ST_TYPE(sym->st_info)),
+                               sym_bind(ELF32_ST_BIND(sym->st_info)),
+                               sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
+                               name);
+               }
+       }
+       printf("\n");
+}
+
+static void print_absolute_relocs(void)
+{
+       int i, printed = 0;
+
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               char *sym_strtab;
+               Elf32_Sym *sh_symtab;
+               unsigned sec_applies, sec_symtab;
+               int j;
+               if (shdr[i].sh_type != SHT_REL) {
+                       continue;
+               }
+               sec_symtab  = shdr[i].sh_link;
+               sec_applies = shdr[i].sh_info;
+               if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+                       continue;
+               }
+               sh_symtab = symtab[sec_symtab];
+               sym_strtab = strtab[shdr[sec_symtab].sh_link];
+               for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+                       Elf32_Rel *rel;
+                       Elf32_Sym *sym;
+                       const char *name;
+                       rel = &reltab[i][j];
+                       sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+                       name = sym_name(sym_strtab, sym);
+                       if (sym->st_shndx != SHN_ABS) {
+                               continue;
+                       }
+
+                       /* Absolute symbols are not relocated if bzImage is
+                        * loaded at a non-compiled address. Display a warning
+                        * to user at compile time about the absolute
+                        * relocations present.
+                        *
+                        * User need to audit the code to make sure
+                        * some symbols which should have been section
+                        * relative have not become absolute because of some
+                        * linker optimization or wrong programming usage.
+                        *
+                        * Before warning check if this absolute symbol
+                        * relocation is harmless.
+                        */
+                       if (is_safe_abs_reloc(name))
+                               continue;
+
+                       if (!printed) {
+                               printf("WARNING: Absolute relocations"
+                                       " present\n");
+                               printf("Offset     Info     Type     Sym.Value "
+                                       "Sym.Name\n");
+                               printed = 1;
+                       }
+
+                       printf("%08x %08x %10s %08x  %s\n",
+                               rel->r_offset,
+                               rel->r_info,
+                               rel_type(ELF32_R_TYPE(rel->r_info)),
+                               sym->st_value,
+                               name);
+               }
+       }
+
+       if (printed)
+               printf("\n");
+}
+
+static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+{
+       int i;
+       /* Walk through the relocations */
+       for(i = 0; i < ehdr.e_shnum; i++) {
+               char *sym_strtab;
+               Elf32_Sym *sh_symtab;
+               unsigned sec_applies, sec_symtab;
+               int j;
+               if (shdr[i].sh_type != SHT_REL) {
+                       continue;
+               }
+               sec_symtab  = shdr[i].sh_link;
+               sec_applies = shdr[i].sh_info;
+               if (!(shdr[sec_applies].sh_flags & SHF_ALLOC)) {
+                       continue;
+               }
+               sh_symtab = symtab[sec_symtab];
+               sym_strtab = strtab[shdr[sec_symtab].sh_link];
+               for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
+                       Elf32_Rel *rel;
+                       Elf32_Sym *sym;
+                       unsigned r_type;
+                       rel = &reltab[i][j];
+                       sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
+                       r_type = ELF32_R_TYPE(rel->r_info);
+                       /* Don't visit relocations to absolute symbols */
+                       if (sym->st_shndx == SHN_ABS) {
+                               continue;
+                       }
+                       if (r_type == R_386_PC32) {
+                               /* PC relative relocations don't need to be adjusted */
+                       }
+                       else if (r_type == R_386_32) {
+                               /* Visit relocations that need to be adjusted */
+                               visit(rel, sym);
+                       }
+                       else {
+                               die("Unsupported relocation type: %d\n", r_type);
+                       }
+               }
+       }
+}
+
+static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+       reloc_count += 1;
+}
+
+static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
+{
+       /* Remember the address that needs to be adjusted. */
+       relocs[reloc_idx++] = rel->r_offset;
+}
+
+static int cmp_relocs(const void *va, const void *vb)
+{
+       const unsigned long *a, *b;
+       a = va; b = vb;
+       return (*a == *b)? 0 : (*a > *b)? 1 : -1;
+}
+
+static void emit_relocs(int as_text)
+{
+       int i;
+       /* Count how many relocations I have and allocate space for them. */
+       reloc_count = 0;
+       walk_relocs(count_reloc);
+       relocs = malloc(reloc_count * sizeof(relocs[0]));
+       if (!relocs) {
+               die("malloc of %d entries for relocs failed\n",
+                       reloc_count);
+       }
+       /* Collect up the relocations */
+       reloc_idx = 0;
+       walk_relocs(collect_reloc);
+
+       /* Order the relocations for more efficient processing */
+       qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
+
+       /* Print the relocations */
+       if (as_text) {
+               /* Print the relocations in a form suitable that
+                * gas will like.
+                */
+               printf(".section \".data.reloc\",\"a\"\n");
+               printf(".balign 4\n");
+               for(i = 0; i < reloc_count; i++) {
+                       printf("\t .long 0x%08lx\n", relocs[i]);
+               }
+               printf("\n");
+       }
+       else {
+               unsigned char buf[4];
+               buf[0] = buf[1] = buf[2] = buf[3] = 0;
+               /* Print a stop */
+               printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+               /* Now print each relocation */
+               for(i = 0; i < reloc_count; i++) {
+                       buf[0] = (relocs[i] >>  0) & 0xff;
+                       buf[1] = (relocs[i] >>  8) & 0xff;
+                       buf[2] = (relocs[i] >> 16) & 0xff;
+                       buf[3] = (relocs[i] >> 24) & 0xff;
+                       printf("%c%c%c%c", buf[0], buf[1], buf[2], buf[3]);
+               }
+       }
+}
+
+static void usage(void)
+{
+       die("relocs [--abs-syms |--abs-relocs | --text] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+       int show_absolute_syms, show_absolute_relocs;
+       int as_text;
+       const char *fname;
+       FILE *fp;
+       int i;
+
+       show_absolute_syms = 0;
+       show_absolute_relocs = 0;
+       as_text = 0;
+       fname = NULL;
+       for(i = 1; i < argc; i++) {
+               char *arg = argv[i];
+               if (*arg == '-') {
+                       if (strcmp(argv[1], "--abs-syms") == 0) {
+                               show_absolute_syms = 1;
+                               continue;
+                       }
+
+                       if (strcmp(argv[1], "--abs-relocs") == 0) {
+                               show_absolute_relocs = 1;
+                               continue;
+                       }
+                       else if (strcmp(argv[1], "--text") == 0) {
+                               as_text = 1;
+                               continue;
+                       }
+               }
+               else if (!fname) {
+                       fname = arg;
+                       continue;
+               }
+               usage();
+       }
+       if (!fname) {
+               usage();
+       }
+       fp = fopen(fname, "r");
+       if (!fp) {
+               die("Cannot open %s: %s\n",
+                       fname, strerror(errno));
+       }
+       read_ehdr(fp);
+       read_shdrs(fp);
+       read_strtabs(fp);
+       read_symtabs(fp);
+       read_relocs(fp);
+       if (show_absolute_syms) {
+               print_absolute_symbols();
+               return 0;
+       }
+       if (show_absolute_relocs) {
+               print_absolute_relocs();
+               return 0;
+       }
+       emit_relocs(as_text);
+       return 0;
+}
diff --git a/arch/i386/boot/compressed/vmlinux.lds b/arch/i386/boot/compressed/vmlinux.lds
new file mode 100644 (file)
index 0000000..cc4854f
--- /dev/null
@@ -0,0 +1,43 @@
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(startup_32)
+SECTIONS
+{
+        /* Be careful parts of head.S assume startup_32 is at
+         * address 0.
+        */
+       . =  0  ;
+       .text.head : {
+               _head = . ;
+               *(.text.head)
+               _ehead = . ;
+       }
+       .data.compressed : {
+               *(.data.compressed)
+       }
+       .text : {
+               _text = .;      /* Text */
+               *(.text)
+               *(.text.*)
+               _etext = . ;
+       }
+       .rodata : {
+               _rodata = . ;
+               *(.rodata)       /* read-only data */
+               *(.rodata.*)
+               _erodata = . ;
+       }
+       .data : {
+               _data = . ;
+               *(.data)
+               *(.data.*)
+               _edata = . ;
+       }
+       .bss : {
+               _bss = . ;
+               *(.bss)
+               *(.bss.*)
+               *(COMMON)
+               _end = . ;
+       }
+}
index 1ed9d791f8638c5a6f9e5bc8d1831779d411b2b6..707a88f7f29ebc24b42b0a793ed132f53d1c0f05 100644 (file)
@@ -1,9 +1,10 @@
 SECTIONS
 {
-  .data : { 
+  .data.compressed : {
        input_len = .;
        LONG(input_data_end - input_data) input_data = .; 
        *(.data) 
+       output_len = . - 4;
        input_data_end = .; 
        }
 }
index 3aec4538a113b9f84d33c3613e7137e18aaa8f61..06edf1c66242466e8853fcedd8afacc19228817a 100644 (file)
@@ -81,7 +81,7 @@ start:
 # This is the setup header, and it must start at %cs:2 (old 0x9020:2)
 
                .ascii  "HdrS"          # header signature
-               .word   0x0204          # header version number (>= 0x0105)
+               .word   0x0205          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
 start_sys_seg: .word   SYSSEG
@@ -160,6 +160,17 @@ ramdisk_max:       .long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff
                                        # The highest safe address for
                                        # the contents of an initrd
 
+kernel_alignment:  .long CONFIG_PHYSICAL_ALIGN         #physical addr alignment
+                                               #required for protected mode
+                                               #kernel
+#ifdef CONFIG_RELOCATABLE
+relocatable_kernel:    .byte 1
+#else
+relocatable_kernel:    .byte 0
+#endif
+pad2:                  .byte 0
+pad3:                  .word 0
+
 trampoline:    call    start_of_setup
                .align 16
                                        # The offset at this point is 0x240
@@ -588,11 +599,6 @@ rmodeswtch_normal:
        call    default_switch
 
 rmodeswtch_end:
-# we get the code32 start address and modify the below 'jmpi'
-# (loader may have changed it)
-       movl    %cs:code32_start, %eax
-       movl    %eax, %cs:code32
-
 # Now we move the system to its rightful place ... but we check if we have a
 # big-kernel. In that case we *must* not move it ...
        testb   $LOADED_HIGH, %cs:loadflags
@@ -788,11 +794,12 @@ a20_err_msg:
 a20_done:
 
 #endif /* CONFIG_X86_VOYAGER */
-# set up gdt and idt
+# set up gdt and idt and 32bit start address
        lidt    idt_48                          # load idt with 0,0
        xorl    %eax, %eax                      # Compute gdt_base
        movw    %ds, %ax                        # (Convert %ds:gdt to a linear ptr)
        shll    $4, %eax
+       addl    %eax, code32
        addl    $gdt, %eax
        movl    %eax, (gdt_48+2)
        lgdt    gdt_48                          # load gdt with whatever is
@@ -851,9 +858,26 @@ flush_instr:
 #      Manual, Mixing 16-bit and 32-bit code, page 16-6)
 
        .byte 0x66, 0xea                        # prefix + jmpi-opcode
-code32:        .long   0x1000                          # will be set to 0x100000
-                                               # for big kernels
+code32:        .long   startup_32                      # will be set to %cs+startup_32
        .word   __BOOT_CS
+.code32
+startup_32:
+       movl $(__BOOT_DS), %eax
+       movl %eax, %ds
+       movl %eax, %es
+       movl %eax, %fs
+       movl %eax, %gs
+       movl %eax, %ss
+
+       xorl %eax, %eax
+1:     incl %eax                               # check that A20 really IS enabled
+       movl %eax, 0x00000000                   # loop forever if it isn't
+       cmpl %eax, 0x00100000
+       je 1b
+
+       # Jump to the 32bit entry point
+       jmpl *(code32_start - start + (DELTA_INITSEG << 4))(%esi)
+.code16
 
 # Here's a bunch of information about your current kernel..
 kernel_version:        .ascii  UTS_RELEASE
index 97aacd6bd7d810a85c15f9d96811167b5b680165..65891f11acedc52640472c1185a47165662a6503 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:56 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec  6 23:50:49 2006
 #
 CONFIG_X86_32=y
 CONFIG_GENERIC_TIME=y
@@ -40,13 +40,14 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
 CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -110,6 +111,7 @@ CONFIG_SMP=y
 # CONFIG_X86_VISWS is not set
 CONFIG_X86_GENERICARCH=y
 # CONFIG_X86_ES7000 is not set
+# CONFIG_PARAVIRT is not set
 CONFIG_X86_CYCLONE_TIMER=y
 # CONFIG_M386 is not set
 # CONFIG_M486 is not set
@@ -120,6 +122,7 @@ CONFIG_X86_CYCLONE_TIMER=y
 # CONFIG_MPENTIUMII is not set
 CONFIG_MPENTIUMIII=y
 # CONFIG_MPENTIUMM is not set
+# CONFIG_MCORE2 is not set
 # CONFIG_MPENTIUM4 is not set
 # CONFIG_MK6 is not set
 # CONFIG_MK7 is not set
@@ -197,7 +200,6 @@ CONFIG_RESOURCES_64BIT=y
 CONFIG_MTRR=y
 # CONFIG_EFI is not set
 # CONFIG_IRQBALANCE is not set
-CONFIG_REGPARM=y
 CONFIG_SECCOMP=y
 # CONFIG_HZ_100 is not set
 CONFIG_HZ_250=y
@@ -205,7 +207,8 @@ CONFIG_HZ_250=y
 CONFIG_HZ=250
 # CONFIG_KEXEC is not set
 # CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x100000
 # CONFIG_HOTPLUG_CPU is not set
 CONFIG_COMPAT_VDSO=y
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
@@ -367,6 +370,7 @@ CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -677,6 +681,7 @@ CONFIG_SATA_INTEL_COMBINED=y
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -850,6 +855,7 @@ CONFIG_BNX2=y
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -984,10 +990,6 @@ CONFIG_RTC=y
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
 # CONFIG_SONYPI is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 CONFIG_AGP=y
 # CONFIG_AGP_ALI is not set
 # CONFIG_AGP_ATI is not set
@@ -1108,6 +1110,7 @@ CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_BANDWIDTH is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
 # CONFIG_USB_OTG is not set
 
 #
@@ -1185,6 +1188,7 @@ CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_KAWETH is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
 # CONFIG_USB_USBNET is not set
 CONFIG_USB_MON=y
 
index 1a884b6e6e5c97d472fb5c0e8c10d982f806d2f9..1e8988e558c54971fe08fbe9f868a72c7126a924 100644 (file)
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
 
 obj-y  := process.o signal.o entry.o traps.o irq.o \
                ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
-               pci-dma.o i386_ksyms.o i387.o bootflag.o \
+               pci-dma.o i386_ksyms.o i387.o bootflag.o e820.o\
                quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
@@ -40,6 +40,9 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_HPET_TIMER)       += hpet.o
 obj-$(CONFIG_K8_NB)            += k8.o
 
+# Make sure this is linked after any other paravirt_ops structs: see head.S
+obj-$(CONFIG_PARAVIRT)         += paravirt.o
+
 EXTRA_AFLAGS   := -traditional
 
 obj-$(CONFIG_SCx200)           += scx200.o
index 4664b55f623ef2b349a510c0dcd487d8149063c3..12e937c1ce4bbe11feccd1be11d75e728adc1f44 100644 (file)
@@ -156,10 +156,8 @@ static int __init ffh_cstate_init(void)
 
 static void __exit ffh_cstate_exit(void)
 {
-       if (cpu_cstate_entry) {
-               free_percpu(cpu_cstate_entry);
-               cpu_cstate_entry = NULL;
-       }
+       free_percpu(cpu_cstate_entry);
+       cpu_cstate_entry = NULL;
 }
 
 arch_initcall(ffh_cstate_init);
index c9841692bb7cc1170861044ecd973916373b18d3..4b60af7f91dd0c915c42f24d09976254b7c42353 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/pci-direct.h>
 #include <asm/acpi.h>
 #include <asm/apic.h>
+#include <asm/irq.h>
 
 #ifdef CONFIG_ACPI
 
@@ -49,6 +50,24 @@ static int __init check_bridge(int vendor, int device)
        return 0;
 }
 
+static void check_intel(void)
+{
+       u16 vendor, device;
+
+       vendor = read_pci_config_16(0, 0, 0, PCI_VENDOR_ID);
+
+       if (vendor != PCI_VENDOR_ID_INTEL)
+               return;
+
+       device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+#ifdef CONFIG_SMP
+       if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+           device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+           device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+               quirk_intel_irqbalance();
+#endif
+}
+
 void __init check_acpi_pci(void)
 {
        int num, slot, func;
@@ -60,6 +79,8 @@ void __init check_acpi_pci(void)
        if (!early_pci_allowed())
                return;
 
+       check_intel();
+
        /* Poor man's PCI discovery */
        for (num = 0; num < 32; num++) {
                for (slot = 0; slot < 32; slot++) {
index 535f9794fba1aa811cb7a1ea270fa3c285fc0f98..9eca21b49f6b3d0487df153594cdcbbfdebc17c7 100644 (file)
@@ -124,6 +124,20 @@ static unsigned char** find_nop_table(void)
 
 #endif /* CONFIG_X86_64 */
 
+static void nop_out(void *insns, unsigned int len)
+{
+       unsigned char **noptable = find_nop_table();
+
+       while (len > 0) {
+               unsigned int noplen = len;
+               if (noplen > ASM_NOP_MAX)
+                       noplen = ASM_NOP_MAX;
+               memcpy(insns, noptable[noplen], noplen);
+               insns += noplen;
+               len -= noplen;
+       }
+}
+
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
 extern u8 *__smp_locks[], *__smp_locks_end[];
@@ -138,10 +152,9 @@ extern u8 __smp_alt_begin[], __smp_alt_end[];
 
 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
 {
-       unsigned char **noptable = find_nop_table();
        struct alt_instr *a;
        u8 *instr;
-       int diff, i, k;
+       int diff;
 
        DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
        for (a = start; a < end; a++) {
@@ -159,13 +172,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
 #endif
                memcpy(instr, a->replacement, a->replacementlen);
                diff = a->instrlen - a->replacementlen;
-               /* Pad the rest with nops */
-               for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
-                       k = diff;
-                       if (k > ASM_NOP_MAX)
-                               k = ASM_NOP_MAX;
-                       memcpy(a->instr + i, noptable[k], k);
-               }
+               nop_out(instr + a->replacementlen, diff);
        }
 }
 
@@ -209,7 +216,6 @@ static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
 
 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
 {
-       unsigned char **noptable = find_nop_table();
        u8 **ptr;
 
        for (ptr = start; ptr < end; ptr++) {
@@ -217,7 +223,7 @@ static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end
                        continue;
                if (*ptr > text_end)
                        continue;
-               **ptr = noptable[1][0];
+               nop_out(*ptr, 1);
        };
 }
 
@@ -343,6 +349,40 @@ void alternatives_smp_switch(int smp)
 
 #endif
 
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{
+       struct paravirt_patch *p;
+
+       for (p = start; p < end; p++) {
+               unsigned int used;
+
+               used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
+                                         p->len);
+#ifdef CONFIG_DEBUG_PARAVIRT
+               {
+               int i;
+               /* Deliberately clobber regs using "not %reg" to find bugs. */
+               for (i = 0; i < 3; i++) {
+                       if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
+                               memcpy(p->instr + used, "\xf7\xd0", 2);
+                               p->instr[used+1] |= i;
+                               used += 2;
+                       }
+               }
+               }
+#endif
+               /* Pad the rest with nops */
+               nop_out(p->instr + used, p->len - used);
+       }
+
+       /* Sync to be conservative, in case we patched following instructions */
+       sync_core();
+}
+extern struct paravirt_patch __start_parainstructions[],
+       __stop_parainstructions[];
+#endif /* CONFIG_PARAVIRT */
+
 void __init alternative_instructions(void)
 {
        unsigned long flags;
@@ -390,5 +430,6 @@ void __init alternative_instructions(void)
                alternatives_smp_switch(0);
        }
 #endif
+       apply_paravirt(__start_parainstructions, __stop_parainstructions);
        local_irq_restore(flags);
 }
index 2fd4b7d927c24f5578b437b7a183da4c24537488..776d9be26af9ea5906bc0e04798878e69d3b51a3 100644 (file)
@@ -647,23 +647,30 @@ static struct {
 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
 {
        unsigned long flags;
+       int maxlvt;
 
        if (!apic_pm_state.active)
                return 0;
 
+       maxlvt = get_maxlvt();
+
        apic_pm_state.apic_id = apic_read(APIC_ID);
        apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
        apic_pm_state.apic_ldr = apic_read(APIC_LDR);
        apic_pm_state.apic_dfr = apic_read(APIC_DFR);
        apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
        apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-       apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+       if (maxlvt >= 4)
+               apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
        apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
        apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
        apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
        apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
        apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-       apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+       if (maxlvt >= 5)
+               apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
        
        local_irq_save(flags);
        disable_local_APIC();
@@ -675,10 +682,13 @@ static int lapic_resume(struct sys_device *dev)
 {
        unsigned int l, h;
        unsigned long flags;
+       int maxlvt;
 
        if (!apic_pm_state.active)
                return 0;
 
+       maxlvt = get_maxlvt();
+
        local_irq_save(flags);
 
        /*
@@ -700,8 +710,12 @@ static int lapic_resume(struct sys_device *dev)
        apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
        apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
        apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-       apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-       apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_P4THERMAL
+       if (maxlvt >= 5)
+               apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+       if (maxlvt >= 4)
+               apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
        apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
        apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
        apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
index a60358fe9a49d828a69c3271459ef5f5949e2aca..a97847da9ed59e85508ace8f5a77be90260d8c6a 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/desc.h>
 #include <asm/i8253.h>
+#include <asm/paravirt.h>
 
 #include "io_ports.h"
 
@@ -2235,7 +2236,7 @@ static int __init apm_init(void)
 
        dmi_check_system(apm_dmi_table);
 
-       if (apm_info.bios.version == 0) {
+       if (apm_info.bios.version == 0 || paravirt_enabled()) {
                printk(KERN_INFO "apm: BIOS not found.\n");
                return -ENODEV;
        }
index c80271f8f084c04014d926185cbe896a86fe8d9e..1b2f3cd332707f7fd6beb0e6fdf7b0a4149eb5ad 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/elf.h>
+#include <asm/pda.h>
 
 #define DEFINE(sym, val) \
         asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -51,13 +52,35 @@ void foo(void)
        OFFSET(TI_exec_domain, thread_info, exec_domain);
        OFFSET(TI_flags, thread_info, flags);
        OFFSET(TI_status, thread_info, status);
-       OFFSET(TI_cpu, thread_info, cpu);
        OFFSET(TI_preempt_count, thread_info, preempt_count);
        OFFSET(TI_addr_limit, thread_info, addr_limit);
        OFFSET(TI_restart_block, thread_info, restart_block);
        OFFSET(TI_sysenter_return, thread_info, sysenter_return);
        BLANK();
 
+       OFFSET(GDS_size, Xgt_desc_struct, size);
+       OFFSET(GDS_address, Xgt_desc_struct, address);
+       OFFSET(GDS_pad, Xgt_desc_struct, pad);
+       BLANK();
+
+       OFFSET(PT_EBX, pt_regs, ebx);
+       OFFSET(PT_ECX, pt_regs, ecx);
+       OFFSET(PT_EDX, pt_regs, edx);
+       OFFSET(PT_ESI, pt_regs, esi);
+       OFFSET(PT_EDI, pt_regs, edi);
+       OFFSET(PT_EBP, pt_regs, ebp);
+       OFFSET(PT_EAX, pt_regs, eax);
+       OFFSET(PT_DS,  pt_regs, xds);
+       OFFSET(PT_ES,  pt_regs, xes);
+       OFFSET(PT_GS,  pt_regs, xgs);
+       OFFSET(PT_ORIG_EAX, pt_regs, orig_eax);
+       OFFSET(PT_EIP, pt_regs, eip);
+       OFFSET(PT_CS,  pt_regs, xcs);
+       OFFSET(PT_EFLAGS, pt_regs, eflags);
+       OFFSET(PT_OLDESP, pt_regs, esp);
+       OFFSET(PT_OLDSS,  pt_regs, xss);
+       BLANK();
+
        OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
        OFFSET(RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
        BLANK();
@@ -74,4 +97,18 @@ void foo(void)
        DEFINE(VDSO_PRELINK, VDSO_PRELINK);
 
        OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
+
+       BLANK();
+       OFFSET(PDA_cpu, i386_pda, cpu_number);
+       OFFSET(PDA_pcurrent, i386_pda, pcurrent);
+
+#ifdef CONFIG_PARAVIRT
+       BLANK();
+       OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled);
+       OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable);
+       OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable);
+       OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
+       OFFSET(PARAVIRT_iret, paravirt_ops, iret);
+       OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
+#endif
 }
index e4758095d87a8d72232f3876a2973adcd06db92a..41cfea57232bb290d7a81061b7f397c53984d23b 100644 (file)
@@ -104,10 +104,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                                        f_vide();
                                rdtscl(d2);
                                d = d2-d;
-                               
-                               /* Knock these two lines out if it debugs out ok */
-                               printk(KERN_INFO "AMD K6 stepping B detected - ");
-                               /* -- cut here -- */
+
                                if (d > 20*K6_BUG_LOOP) 
                                        printk("system stability may be impaired when more than 32 MB are used.\n");
                                else 
index d9f3e3c31f054240c790bbb461c8f261f2726487..1b34c56f8123ac7f4ccb7a7e677bee6ef8f7cb6f 100644 (file)
 #include <asm/apic.h>
 #include <mach_apic.h>
 #endif
+#include <asm/pda.h>
 
 #include "cpu.h"
 
 DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
 EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
 
-DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
+struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(_cpu_pda);
 
 static int cachesize_override __cpuinitdata = -1;
 static int disable_x86_fxsr __cpuinitdata;
@@ -235,29 +236,14 @@ static int __cpuinit have_cpuid_p(void)
        return flag_is_changeable_p(X86_EFLAGS_ID);
 }
 
-/* Do minimum CPU detection early.
-   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
-   The others are not touched to avoid unwanted side effects.
-
-   WARNING: this function is only called on the BP.  Don't add code here
-   that is supposed to run on all CPUs. */
-static void __init early_cpu_detect(void)
+void __init cpu_detect(struct cpuinfo_x86 *c)
 {
-       struct cpuinfo_x86 *c = &boot_cpu_data;
-
-       c->x86_cache_alignment = 32;
-
-       if (!have_cpuid_p())
-               return;
-
        /* Get vendor name */
        cpuid(0x00000000, &c->cpuid_level,
              (int *)&c->x86_vendor_id[0],
              (int *)&c->x86_vendor_id[8],
              (int *)&c->x86_vendor_id[4]);
 
-       get_cpu_vendor(c, 1);
-
        c->x86 = 4;
        if (c->cpuid_level >= 0x00000001) {
                u32 junk, tfms, cap0, misc;
@@ -274,6 +260,26 @@ static void __init early_cpu_detect(void)
        }
 }
 
+/* Do minimum CPU detection early.
+   Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+   The others are not touched to avoid unwanted side effects.
+
+   WARNING: this function is only called on the BP.  Don't add code here
+   that is supposed to run on all CPUs. */
+static void __init early_cpu_detect(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       c->x86_cache_alignment = 32;
+
+       if (!have_cpuid_p())
+               return;
+
+       cpu_detect(c);
+
+       get_cpu_vendor(c, 1);
+}
+
 static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
 {
        u32 tfms, xlvl;
@@ -308,6 +314,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
 #else
                        c->apicid = (ebx >> 24) & 0xFF;
 #endif
+                       if (c->x86_capability[0] & (1<<19))
+                               c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
                } else {
                        /* Have CPUID level 0 only - unheard of */
                        c->x86 = 4;
@@ -372,6 +380,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        c->x86_vendor_id[0] = '\0'; /* Unset */
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
+       c->x86_clflush_size = 32;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
        if (!have_cpuid_p()) {
@@ -591,42 +600,24 @@ void __init early_cpu_init(void)
        disable_pse = 1;
 #endif
 }
-/*
- * cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
- */
-void __cpuinit cpu_init(void)
+
+/* Make sure %gs is initialized properly in idle threads */
+struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
 {
-       int cpu = smp_processor_id();
-       struct tss_struct * t = &per_cpu(init_tss, cpu);
-       struct thread_struct *thread = &current->thread;
-       struct desc_struct *gdt;
-       __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
-       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+       memset(regs, 0, sizeof(struct pt_regs));
+       regs->xgs = __KERNEL_PDA;
+       return regs;
+}
 
-       if (cpu_test_and_set(cpu, cpu_initialized)) {
-               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-               for (;;) local_irq_enable();
-       }
-       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+static __cpuinit int alloc_gdt(int cpu)
+{
+       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+       struct desc_struct *gdt;
+       struct i386_pda *pda;
 
-       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
-               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
-       if (tsc_disable && cpu_has_tsc) {
-               printk(KERN_NOTICE "Disabling TSC...\n");
-               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
-               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
-               set_in_cr4(X86_CR4_TSD);
-       }
+       gdt = (struct desc_struct *)cpu_gdt_descr->address;
+       pda = cpu_pda(cpu);
 
-       /* The CPU hotplug case */
-       if (cpu_gdt_descr->address) {
-               gdt = (struct desc_struct *)cpu_gdt_descr->address;
-               memset(gdt, 0, PAGE_SIZE);
-               goto old_gdt;
-       }
        /*
         * This is a horrible hack to allocate the GDT.  The problem
         * is that cpu_init() is called really early for the boot CPU
@@ -634,43 +625,130 @@ void __cpuinit cpu_init(void)
         * CPUs, when bootmem will have gone away
         */
        if (NODE_DATA(0)->bdata->node_bootmem_map) {
-               gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
-               /* alloc_bootmem_pages panics on failure, so no check */
+               BUG_ON(gdt != NULL || pda != NULL);
+
+               gdt = alloc_bootmem_pages(PAGE_SIZE);
+               pda = alloc_bootmem(sizeof(*pda));
+               /* alloc_bootmem(_pages) panics on failure, so no check */
+
                memset(gdt, 0, PAGE_SIZE);
+               memset(pda, 0, sizeof(*pda));
        } else {
-               gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
-               if (unlikely(!gdt)) {
-                       printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
-                       for (;;)
-                               local_irq_enable();
+               /* GDT and PDA might already have been allocated if
+                  this is a CPU hotplug re-insertion. */
+               if (gdt == NULL)
+                       gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
+
+               if (pda == NULL)
+                       pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
+
+               if (unlikely(!gdt || !pda)) {
+                       free_pages((unsigned long)gdt, 0);
+                       kfree(pda);
+                       return 0;
                }
        }
-old_gdt:
+
+       cpu_gdt_descr->address = (unsigned long)gdt;
+       cpu_pda(cpu) = pda;
+
+       return 1;
+}
+
+/* Initial PDA used by boot CPU */
+struct i386_pda boot_pda = {
+       ._pda = &boot_pda,
+       .cpu_number = 0,
+       .pcurrent = &init_task,
+};
+
+static inline void set_kernel_gs(void)
+{
+       /* Set %gs for this CPU's PDA.  Memory clobber is to create a
+          barrier with respect to any PDA operations, so the compiler
+          doesn't move any before here. */
+       asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
+}
+
+/* Initialize the CPU's GDT and PDA.  The boot CPU does this for
+   itself, but secondaries find this done for them. */
+__cpuinit int init_gdt(int cpu, struct task_struct *idle)
+{
+       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+       struct desc_struct *gdt;
+       struct i386_pda *pda;
+
+       /* For non-boot CPUs, the GDT and PDA should already have been
+          allocated. */
+       if (!alloc_gdt(cpu)) {
+               printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
+               return 0;
+       }
+
+       gdt = (struct desc_struct *)cpu_gdt_descr->address;
+       pda = cpu_pda(cpu);
+
+       BUG_ON(gdt == NULL || pda == NULL);
+
        /*
         * Initialize the per-CPU GDT with the boot GDT,
         * and set up the GDT descriptor:
         */
        memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+       cpu_gdt_descr->size = GDT_SIZE - 1;
 
-       /* Set up GDT entry for 16bit stack */
-       *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
-               ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
-               ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
-               (CPU_16BIT_STACK_SIZE - 1);
+       pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+                       (u32 *)&gdt[GDT_ENTRY_PDA].b,
+                       (unsigned long)pda, sizeof(*pda) - 1,
+                       0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
 
-       cpu_gdt_descr->size = GDT_SIZE - 1;
-       cpu_gdt_descr->address = (unsigned long)gdt;
+       memset(pda, 0, sizeof(*pda));
+       pda->_pda = pda;
+       pda->cpu_number = cpu;
+       pda->pcurrent = idle;
+
+       return 1;
+}
+
+/* Common CPU init for both boot and secondary CPUs */
+static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
+{
+       struct tss_struct * t = &per_cpu(init_tss, cpu);
+       struct thread_struct *thread = &curr->thread;
+       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
+       /* Reinit these anyway, even if they've already been done (on
+          the boot CPU, this will transition from the boot gdt+pda to
+          the real ones). */
        load_gdt(cpu_gdt_descr);
+       set_kernel_gs();
+
+       if (cpu_test_and_set(cpu, cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+               for (;;) local_irq_enable();
+       }
+
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+       if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
+               clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+       if (tsc_disable && cpu_has_tsc) {
+               printk(KERN_NOTICE "Disabling TSC...\n");
+               /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+               clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+               set_in_cr4(X86_CR4_TSD);
+       }
+
        load_idt(&idt_descr);
 
        /*
         * Set up and load the per-CPU TSS and LDT
         */
        atomic_inc(&init_mm.mm_count);
-       current->active_mm = &init_mm;
-       BUG_ON(current->mm);
-       enter_lazy_tlb(&init_mm, current);
+       curr->active_mm = &init_mm;
+       if (curr->mm)
+               BUG();
+       enter_lazy_tlb(&init_mm, curr);
 
        load_esp0(t, thread);
        set_tss_desc(cpu,t);
@@ -682,8 +760,8 @@ old_gdt:
        __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
 #endif
 
-       /* Clear %fs and %gs. */
-       asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0));
+       /* Clear %fs. */
+       asm volatile ("mov %0, %%fs" : : "r" (0));
 
        /* Clear all 6 debug registers: */
        set_debugreg(0, 0);
@@ -701,6 +779,37 @@ old_gdt:
        mxcsr_feature_mask_init();
 }
 
+/* Entrypoint to initialize secondary CPU */
+void __cpuinit secondary_cpu_init(void)
+{
+       int cpu = smp_processor_id();
+       struct task_struct *curr = current;
+
+       _cpu_init(cpu, curr);
+}
+
+/*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+void __cpuinit cpu_init(void)
+{
+       int cpu = smp_processor_id();
+       struct task_struct *curr = current;
+
+       /* Set up the real GDT and PDA, so we can transition from the
+          boot versions. */
+       if (!init_gdt(cpu, curr)) {
+               /* failed to allocate something; not much we can do... */
+               for (;;)
+                       local_irq_enable();
+       }
+
+       _cpu_init(cpu, curr);
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 void __cpuinit cpu_uninit(void)
 {
index 94a95aa5227e8be03735f56a6ecd5c84638e2be7..56fe26584957f7b9b733ea4e8fcd41b189ab0936 100644 (file)
@@ -107,7 +107,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
         * Note that the workaround only should be initialized once...
         */
        c->f00f_bug = 0;
-       if ( c->x86 == 5 ) {
+       if (!paravirt_enabled() && c->x86 == 5) {
                static int f00f_workaround_enabled = 0;
 
                c->f00f_bug = 1;
@@ -195,8 +195,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
        if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
                (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
-}
 
+       if (cpu_has_ds) {
+               unsigned int l1;
+               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+               if (!(l1 & (1<<11)))
+                       set_bit(X86_FEATURE_BTS, c->x86_capability);
+               if (!(l1 & (1<<12)))
+                       set_bit(X86_FEATURE_PEBS, c->x86_capability);
+       }
+}
 
 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
 {
index 5c43be47587f5d4b9875e4c45070027cdb829d95..80b4c5d421b1366915028b2002af39cdae851c95 100644 (file)
@@ -480,12 +480,10 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
        if (num_cache_leaves == 0)
                return -ENOENT;
 
-       cpuid4_info[cpu] = kmalloc(
+       cpuid4_info[cpu] = kzalloc(
            sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
        if (unlikely(cpuid4_info[cpu] == NULL))
                return -ENOMEM;
-       memset(cpuid4_info[cpu], 0,
-           sizeof(struct _cpuid4_info) * num_cache_leaves);
 
        oldmask = current->cpus_allowed;
        retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -658,17 +656,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
                return -ENOENT;
 
        /* Allocate all required memory */
-       cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
+       cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
        if (unlikely(cache_kobject[cpu] == NULL))
                goto err_out;
-       memset(cache_kobject[cpu], 0, sizeof(struct kobject));
 
-       index_kobject[cpu] = kmalloc(
+       index_kobject[cpu] = kzalloc(
            sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
        if (unlikely(index_kobject[cpu] == NULL))
                goto err_out;
-       memset(index_kobject[cpu], 0,
-           sizeof(struct _index_kobject) * num_cache_leaves);
 
        return 0;
 
index 1f9153ae5b03729c799e792f7bef3a508d49cccd..6b5d3518a1c09b3532871fc337419b9477dcbc0c 100644 (file)
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
        }
 }
 
-static void mce_work_fn(void *data);
-static DECLARE_WORK(mce_work, mce_work_fn, NULL);
+static void mce_work_fn(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
 
-static void mce_work_fn(void *data)
+static void mce_work_fn(struct work_struct *work)
 { 
        on_each_cpu(mce_checkregs, NULL, 1, 1);
        schedule_delayed_work(&mce_work, MCE_RATE);
index bad8b4420709c3ee984dc89476929ad953e5d7a9..065005c3f16879c40fe82e59a53ebc8d95f240c8 100644 (file)
@@ -116,7 +116,6 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
        return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
 {
        return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
@@ -153,7 +152,6 @@ static struct notifier_block thermal_throttle_cpu_notifier =
 {
        .notifier_call = thermal_throttle_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int thermal_throttle_init_device(void)
 {
index a25b701ab84e4e83872907ce922149116d8f95c4..191fc05336494bcb30c38c1a3e76d452030cd5d7 100644 (file)
@@ -1,5 +1,3 @@
 obj-y          := main.o if.o generic.o state.o
-obj-y          += amd.o
-obj-y          += cyrix.o
-obj-y          += centaur.o
+obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
 
index 1a1e04b6fd0044c05b30e44bee18648f68e076c0..0949cdbf848afcceff5e997f12ac882075c45876 100644 (file)
@@ -7,7 +7,7 @@
 
 static void
 amd_get_mtrr(unsigned int reg, unsigned long *base,
-            unsigned int *size, mtrr_type * type)
+            unsigned long *size, mtrr_type * type)
 {
        unsigned long low, high;
 
index 33f00ac314ef3e1e51b4488be696ac3133b3ad28..cb9aa3a7a7abe4ae37ce8f7cbc0e2c29b62159a3 100644 (file)
@@ -17,7 +17,7 @@ static u8 centaur_mcr_type;   /* 0 for winchip, 1 for winchip2 */
  */
 
 static int
-centaur_get_free_region(unsigned long base, unsigned long size)
+centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free MTRR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -26,10 +26,11 @@ centaur_get_free_region(unsigned long base, unsigned long size)
 {
        int i, max;
        mtrr_type ltype;
-       unsigned long lbase;
-       unsigned int lsize;
+       unsigned long lbase, lsize;
 
        max = num_var_ranges;
+       if (replace_reg >= 0 && replace_reg < max)
+               return replace_reg;
        for (i = 0; i < max; ++i) {
                if (centaur_mcr_reserved & (1 << i))
                        continue;
@@ -49,7 +50,7 @@ mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 
 static void
 centaur_get_mcr(unsigned int reg, unsigned long *base,
-               unsigned int *size, mtrr_type * type)
+               unsigned long *size, mtrr_type * type)
 {
        *base = centaur_mcr[reg].high >> PAGE_SHIFT;
        *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
index 9027a987006b93890f17363f2d4fac4c749e7f23..0737a596db434d7e6f9274c7447e3b8daf81e07c 100644 (file)
@@ -9,7 +9,7 @@ int arr3_protected;
 
 static void
 cyrix_get_arr(unsigned int reg, unsigned long *base,
-             unsigned int *size, mtrr_type * type)
+             unsigned long *size, mtrr_type * type)
 {
        unsigned long flags;
        unsigned char arr, ccr3, rcr, shift;
@@ -77,7 +77,7 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
 }
 
 static int
-cyrix_get_free_region(unsigned long base, unsigned long size)
+cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free ARR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -86,9 +86,24 @@ cyrix_get_free_region(unsigned long base, unsigned long size)
 {
        int i;
        mtrr_type ltype;
-       unsigned long lbase;
-       unsigned int  lsize;
+       unsigned long lbase, lsize;
 
+       switch (replace_reg) {
+       case 7:
+               if (size < 0x40)
+                       break;
+       case 6:
+       case 5:
+       case 4:
+               return replace_reg;
+       case 3:
+               if (arr3_protected)
+                       break;
+       case 2:
+       case 1:
+       case 0:
+               return replace_reg;
+       }
        /* If we are to set up a region >32M then look at ARR7 immediately */
        if (size > 0x2000) {
                cyrix_get_arr(7, &lbase, &lsize, &ltype);
@@ -214,7 +229,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
 
 typedef struct {
        unsigned long base;
-       unsigned int size;
+       unsigned long size;
        mtrr_type type;
 } arr_state_t;
 
index 0b61eed8bbd819cb3f4d71a59feec3376acec5c6..f77fc53db654655ccf5477e16c3750012a1a1230 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <asm/io.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
@@ -15,12 +16,19 @@ struct mtrr_state {
        struct mtrr_var_range *var_ranges;
        mtrr_type fixed_ranges[NUM_FIXED_RANGES];
        unsigned char enabled;
+       unsigned char have_fixed;
        mtrr_type def_type;
 };
 
 static unsigned long smp_changes_mask;
 static struct mtrr_state mtrr_state = {};
 
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "mtrr."
+
+static __initdata int mtrr_show;
+module_param_named(show, mtrr_show, bool, 0);
+
 /*  Get the MSR pair relating to a var range  */
 static void __init
 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -43,6 +51,14 @@ get_fixed_ranges(mtrr_type * frs)
                rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
 }
 
+static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
+{
+       unsigned i;
+
+       for (i = 0; i < 8; ++i, ++types, base += step)
+               printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
+}
+
 /*  Grab all of the MTRR state for this CPU into *state  */
 void __init get_mtrr_state(void)
 {
@@ -58,13 +74,49 @@ void __init get_mtrr_state(void)
        } 
        vrs = mtrr_state.var_ranges;
 
+       rdmsr(MTRRcap_MSR, lo, dummy);
+       mtrr_state.have_fixed = (lo >> 8) & 1;
+
        for (i = 0; i < num_var_ranges; i++)
                get_mtrr_var_range(i, &vrs[i]);
-       get_fixed_ranges(mtrr_state.fixed_ranges);
+       if (mtrr_state.have_fixed)
+               get_fixed_ranges(mtrr_state.fixed_ranges);
 
        rdmsr(MTRRdefType_MSR, lo, dummy);
        mtrr_state.def_type = (lo & 0xff);
        mtrr_state.enabled = (lo & 0xc00) >> 10;
+
+       if (mtrr_show) {
+               int high_width;
+
+               printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
+               if (mtrr_state.have_fixed) {
+                       printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
+                              mtrr_state.enabled & 1 ? "en" : "dis");
+                       print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
+                       for (i = 0; i < 2; ++i)
+                               print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
+                       for (i = 0; i < 8; ++i)
+                               print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
+               }
+               printk(KERN_INFO "MTRR variable ranges %sabled:\n",
+                      mtrr_state.enabled & 2 ? "en" : "dis");
+               high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
+               for (i = 0; i < num_var_ranges; ++i) {
+                       if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
+                               printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
+                                      i,
+                                      high_width,
+                                      mtrr_state.var_ranges[i].base_hi,
+                                      mtrr_state.var_ranges[i].base_lo >> 12,
+                                      high_width,
+                                      mtrr_state.var_ranges[i].mask_hi,
+                                      mtrr_state.var_ranges[i].mask_lo >> 12,
+                                      mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
+                       else
+                               printk(KERN_INFO "MTRR %u disabled\n", i);
+               }
+       }
 }
 
 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
@@ -95,7 +147,7 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
                        smp_processor_id(), msr, a, b);
 }
 
-int generic_get_free_region(unsigned long base, unsigned long size)
+int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
 /*  [SUMMARY] Get a free MTRR.
     <base> The starting (base) address of the region.
     <size> The size (in bytes) of the region.
@@ -104,10 +156,11 @@ int generic_get_free_region(unsigned long base, unsigned long size)
 {
        int i, max;
        mtrr_type ltype;
-       unsigned long lbase;
-       unsigned lsize;
+       unsigned long lbase, lsize;
 
        max = num_var_ranges;
+       if (replace_reg >= 0 && replace_reg < max)
+               return replace_reg;
        for (i = 0; i < max; ++i) {
                mtrr_if->get(i, &lbase, &lsize, &ltype);
                if (lsize == 0)
@@ -117,7 +170,7 @@ int generic_get_free_region(unsigned long base, unsigned long size)
 }
 
 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
-                            unsigned int *size, mtrr_type * type)
+                            unsigned long *size, mtrr_type *type)
 {
        unsigned int mask_lo, mask_hi, base_lo, base_hi;
 
@@ -202,7 +255,9 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
        return changed;
 }
 
-static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
+static u32 deftype_lo, deftype_hi;
+
+static unsigned long set_mtrr_state(void)
 /*  [SUMMARY] Set the MTRR state for this CPU.
     <state> The MTRR state information to read.
     <ctxt> Some relevant CPU context.
@@ -217,14 +272,14 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
                if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
                        change_mask |= MTRR_CHANGE_MASK_VARIABLE;
 
-       if (set_fixed_ranges(mtrr_state.fixed_ranges))
+       if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
                change_mask |= MTRR_CHANGE_MASK_FIXED;
 
        /*  Set_mtrr_restore restores the old value of MTRRdefType,
           so to set it we fiddle with the saved value  */
        if ((deftype_lo & 0xff) != mtrr_state.def_type
            || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
-               deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
+               deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
                change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
        }
 
@@ -233,7 +288,6 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
 
 
 static unsigned long cr4 = 0;
-static u32 deftype_lo, deftype_hi;
 static DEFINE_SPINLOCK(set_atomicity_lock);
 
 /*
@@ -271,7 +325,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
        rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
 
        /*  Disable MTRRs, and set the default type to uncached  */
-       mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
+       mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
@@ -300,7 +354,7 @@ static void generic_set_all(void)
        prepare_set();
 
        /* Actually set the state */
-       mask = set_mtrr_state(deftype_lo,deftype_hi);
+       mask = set_mtrr_state();
 
        post_set();
        local_irq_restore(flags);
@@ -366,7 +420,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, unsigned i
                        printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
                        return -EINVAL;
                }
-               if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
+               if (!(base + size < 0x70000 || base > 0x7003F) &&
                    (type == MTRR_TYPE_WRCOMB
                     || type == MTRR_TYPE_WRBACK)) {
                        printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
index 5ac051bb9d55e36b4daf4a1a06e072992728d18e..5ae1705eafa6281adcfc065702a0d2743575d5e2 100644 (file)
@@ -17,7 +17,7 @@ extern unsigned int *usage_table;
 
 #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
 
-static char *mtrr_strings[MTRR_NUM_TYPES] =
+static const char *const mtrr_strings[MTRR_NUM_TYPES] =
 {
     "uncachable",               /* 0 */
     "write-combining",          /* 1 */
@@ -28,7 +28,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
     "write-back",               /* 6 */
 };
 
-char *mtrr_attrib_to_str(int x)
+const char *mtrr_attrib_to_str(int x)
 {
        return (x <= 6) ? mtrr_strings[x] : "?";
 }
@@ -44,10 +44,9 @@ mtrr_file_add(unsigned long base, unsigned long size,
 
        max = num_var_ranges;
        if (fcount == NULL) {
-               fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL);
+               fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
                if (!fcount)
                        return -ENOMEM;
-               memset(fcount, 0, max * sizeof *fcount);
                FILE_FCOUNT(file) = fcount;
        }
        if (!page) {
@@ -155,6 +154,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
 {
        int err = 0;
        mtrr_type type;
+       unsigned long size;
        struct mtrr_sentry sentry;
        struct mtrr_gentry gentry;
        void __user *arg = (void __user *) __arg;
@@ -235,15 +235,15 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
        case MTRRIOC_GET_ENTRY:
                if (gentry.regnum >= num_var_ranges)
                        return -EINVAL;
-               mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
+               mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
 
                /* Hide entries that go above 4GB */
-               if (gentry.base + gentry.size > 0x100000
-                   || gentry.size == 0x100000)
+               if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
+                   || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
                        gentry.base = gentry.size = gentry.type = 0;
                else {
                        gentry.base <<= PAGE_SHIFT;
-                       gentry.size <<= PAGE_SHIFT;
+                       gentry.size = size << PAGE_SHIFT;
                        gentry.type = type;
                }
 
@@ -273,8 +273,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
        case MTRRIOC_GET_PAGE_ENTRY:
                if (gentry.regnum >= num_var_ranges)
                        return -EINVAL;
-               mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
-               gentry.type = type;
+               mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
+               /* Hide entries that would overflow */
+               if (size != (__typeof__(gentry.size))size)
+                       gentry.base = gentry.size = gentry.type = 0;
+               else {
+                       gentry.size = size;
+                       gentry.type = type;
+               }
                break;
        }
 
@@ -353,8 +359,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
        char factor;
        int i, max, len;
        mtrr_type type;
-       unsigned long base;
-       unsigned int size;
+       unsigned long base, size;
 
        len = 0;
        max = num_var_ranges;
@@ -373,7 +378,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
                        }
                        /* RED-PEN: base can be > 32bit */ 
                        len += seq_printf(seq, 
-                                  "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n",
+                                  "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
                             i, base, base >> (20 - PAGE_SHIFT), size, factor,
                             mtrr_attrib_to_str(type), usage_table[i]);
                }
index fff90bda473323a7fd0364d6db35475bbb862cd8..16bb7ea87145411e9ee63f1dd31b982a4f7ed816 100644 (file)
@@ -59,7 +59,11 @@ struct mtrr_ops * mtrr_if = NULL;
 static void set_mtrr(unsigned int reg, unsigned long base,
                     unsigned long size, mtrr_type type);
 
+#ifndef CONFIG_X86_64
 extern int arr3_protected;
+#else
+#define arr3_protected 0
+#endif
 
 void set_mtrr_ops(struct mtrr_ops * ops)
 {
@@ -168,6 +172,13 @@ static void ipi_handler(void *info)
 
 #endif
 
+static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
+       return type1 == MTRR_TYPE_UNCACHABLE ||
+              type2 == MTRR_TYPE_UNCACHABLE ||
+              (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
+              (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
+}
+
 /**
  * set_mtrr - update mtrrs on all processors
  * @reg:       mtrr in question
@@ -263,8 +274,8 @@ static void set_mtrr(unsigned int reg, unsigned long base,
 
 /**
  *     mtrr_add_page - Add a memory type region
- *     @base: Physical base address of region in pages (4 KB)
- *     @size: Physical size of region in pages (4 KB)
+ *     @base: Physical base address of region in pages (in units of 4 kB!)
+ *     @size: Physical size of region in pages (4 kB)
  *     @type: Type of MTRR desired
  *     @increment: If this is true do usage counting on the region
  *
@@ -300,11 +311,9 @@ static void set_mtrr(unsigned int reg, unsigned long base,
 int mtrr_add_page(unsigned long base, unsigned long size, 
                  unsigned int type, char increment)
 {
-       int i;
+       int i, replace, error;
        mtrr_type ltype;
-       unsigned long lbase;
-       unsigned int lsize;
-       int error;
+       unsigned long lbase, lsize;
 
        if (!mtrr_if)
                return -ENXIO;
@@ -324,12 +333,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
                return -ENOSYS;
        }
 
+       if (!size) {
+               printk(KERN_WARNING "mtrr: zero sized request\n");
+               return -EINVAL;
+       }
+
        if (base & size_or_mask || size & size_or_mask) {
                printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
                return -EINVAL;
        }
 
        error = -EINVAL;
+       replace = -1;
 
        /* No CPU hotplug when we change MTRR entries */
        lock_cpu_hotplug();
@@ -337,21 +352,28 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        mutex_lock(&mtrr_mutex);
        for (i = 0; i < num_var_ranges; ++i) {
                mtrr_if->get(i, &lbase, &lsize, &ltype);
-               if (base >= lbase + lsize)
-                       continue;
-               if ((base < lbase) && (base + size <= lbase))
+               if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
                        continue;
                /*  At this point we know there is some kind of overlap/enclosure  */
-               if ((base < lbase) || (base + size > lbase + lsize)) {
+               if (base < lbase || base + size - 1 > lbase + lsize - 1) {
+                       if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
+                               /*  New region encloses an existing region  */
+                               if (type == ltype) {
+                                       replace = replace == -1 ? i : -2;
+                                       continue;
+                               }
+                               else if (types_compatible(type, ltype))
+                                       continue;
+                       }
                        printk(KERN_WARNING
                               "mtrr: 0x%lx000,0x%lx000 overlaps existing"
-                              " 0x%lx000,0x%x000\n", base, size, lbase,
+                              " 0x%lx000,0x%lx000\n", base, size, lbase,
                               lsize);
                        goto out;
                }
                /*  New region is enclosed by an existing region  */
                if (ltype != type) {
-                       if (type == MTRR_TYPE_UNCACHABLE)
+                       if (types_compatible(type, ltype))
                                continue;
                        printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
                             base, size, mtrr_attrib_to_str(ltype),
@@ -364,10 +386,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
                goto out;
        }
        /*  Search for an empty MTRR  */
-       i = mtrr_if->get_free_region(base, size);
+       i = mtrr_if->get_free_region(base, size, replace);
        if (i >= 0) {
                set_mtrr(i, base, size, type);
-               usage_table[i] = 1;
+               if (likely(replace < 0))
+                       usage_table[i] = 1;
+               else {
+                       usage_table[i] = usage_table[replace] + !!increment;
+                       if (unlikely(replace != i)) {
+                               set_mtrr(replace, 0, 0, 0);
+                               usage_table[replace] = 0;
+                       }
+               }
        } else
                printk(KERN_INFO "mtrr: no more MTRRs available\n");
        error = i;
@@ -455,8 +485,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 {
        int i, max;
        mtrr_type ltype;
-       unsigned long lbase;
-       unsigned int lsize;
+       unsigned long lbase, lsize;
        int error = -EINVAL;
 
        if (!mtrr_if)
@@ -544,9 +573,11 @@ extern void centaur_init_mtrr(void);
 
 static void __init init_ifs(void)
 {
+#ifndef CONFIG_X86_64
        amd_init_mtrr();
        cyrix_init_mtrr();
        centaur_init_mtrr();
+#endif
 }
 
 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
@@ -555,7 +586,7 @@ static void __init init_ifs(void)
 struct mtrr_value {
        mtrr_type       ltype;
        unsigned long   lbase;
-       unsigned int    lsize;
+       unsigned long   lsize;
 };
 
 static struct mtrr_value * mtrr_state;
@@ -565,10 +596,8 @@ static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
        int i;
        int size = num_var_ranges * sizeof(struct mtrr_value);
 
-       mtrr_state = kmalloc(size,GFP_ATOMIC);
-       if (mtrr_state)
-               memset(mtrr_state,0,size);
-       else
+       mtrr_state = kzalloc(size,GFP_ATOMIC);
+       if (!mtrr_state)
                return -ENOMEM;
 
        for (i = 0; i < num_var_ranges; i++) {
index 99c9f2682041fad9faafa11a6b63d87b319df9f4..d61ea9db6cfebfd908ca2184621979bc2faf82ab 100644 (file)
@@ -43,15 +43,16 @@ struct mtrr_ops {
        void    (*set_all)(void);
 
        void    (*get)(unsigned int reg, unsigned long *base,
-                      unsigned int *size, mtrr_type * type);
-       int     (*get_free_region) (unsigned long base, unsigned long size);
-
+                      unsigned long *size, mtrr_type * type);
+       int     (*get_free_region)(unsigned long base, unsigned long size,
+                                  int replace_reg);
        int     (*validate_add_page)(unsigned long base, unsigned long size,
                                     unsigned int type);
        int     (*have_wrcomb)(void);
 };
 
-extern int generic_get_free_region(unsigned long base, unsigned long size);
+extern int generic_get_free_region(unsigned long base, unsigned long size,
+                                  int replace_reg);
 extern int generic_validate_add_page(unsigned long base, unsigned long size,
                                     unsigned int type);
 
@@ -62,17 +63,17 @@ extern int positive_have_wrcomb(void);
 /* library functions for processor-specific routines */
 struct set_mtrr_context {
        unsigned long flags;
-       unsigned long deftype_lo;
-       unsigned long deftype_hi;
        unsigned long cr4val;
-       unsigned long ccr3;
+       u32 deftype_lo;
+       u32 deftype_hi;
+       u32 ccr3;
 };
 
 struct mtrr_var_range {
-       unsigned long base_lo;
-       unsigned long base_hi;
-       unsigned long mask_lo;
-       unsigned long mask_hi;
+       u32 base_lo;
+       u32 base_hi;
+       u32 mask_lo;
+       u32 mask_hi;
 };
 
 void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -92,6 +93,6 @@ extern struct mtrr_ops * mtrr_if;
 extern unsigned int num_var_ranges;
 
 void mtrr_state_warn(void);
-char *mtrr_attrib_to_str(int x);
+const char *mtrr_attrib_to_str(int x);
 void mtrr_wrmsr(unsigned, unsigned, unsigned);
 
index 76aac088a323de6962de5aeb82ffa7911c8d8265..6624d8583c428a20694641aabba7f0ec6ce9308c 100644 (file)
@@ -152,9 +152,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                                seq_printf(m, " [%d]", i);
                }
 
-       seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
+       seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
                     c->loops_per_jiffy/(500000/HZ),
                     (c->loops_per_jiffy/(5000/HZ)) % 100);
+       seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
 
        return 0;
 }
index ab0c327e79dcc3ffde57dce2bfc50b97b9ed69ba..db6dd20c3589ebf46e78176a123874f6b074f215 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/major.h>
 #include <linux/fs.h>
 #include <linux/smp_lock.h>
-#include <linux/fs.h>
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
@@ -167,7 +166,6 @@ static int cpuid_device_create(int i)
        return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
@@ -187,7 +185,6 @@ static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
 {
        .notifier_call = cpuid_class_cpu_callback,
 };
-#endif /* !CONFIG_HOTPLUG_CPU */
 
 static int __init cpuid_init(void)
 {
index 144b432889655aa77a6d8196ac724e383ae402ec..a5e0e990ea9539eaff5c80105af16ad8335add62 100644 (file)
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-                                                              size_t data_len)
-{
-       struct elf_note note;
-
-       note.n_namesz = strlen(name) + 1;
-       note.n_descsz = data_len;
-       note.n_type   = type;
-       memcpy(buf, &note, sizeof(note));
-       buf += (sizeof(note) +3)/4;
-       memcpy(buf, name, note.n_namesz);
-       buf += (note.n_namesz + 3)/4;
-       memcpy(buf, data, note.n_descsz);
-       buf += (note.n_descsz + 3)/4;
-
-       return buf;
-}
-
-static void final_note(u32 *buf)
-{
-       struct elf_note note;
-
-       note.n_namesz = 0;
-       note.n_descsz = 0;
-       note.n_type   = 0;
-       memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-       struct elf_prstatus prstatus;
-       u32 *buf;
-
-       if ((cpu < 0) || (cpu >= NR_CPUS))
-               return;
-
-       /* Using ELF notes here is opportunistic.
-        * I need a well defined structure format
-        * for the data I pass, and I need tags
-        * on the data to indicate what information I have
-        * squirrelled away.  ELF notes happen to provide
-        * all of that, so there is no need to invent something new.
-        */
-       buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-       if (!buf)
-               return;
-       memset(&prstatus, 0, sizeof(prstatus));
-       prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
-       buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-                               sizeof(prstatus));
-       final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-       int cpu;
-
-       cpu = safe_smp_processor_id();
-       crash_save_this_cpu(regs, cpu);
-}
-
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 static atomic_t waiting_for_crash_ipi;
 
@@ -121,7 +59,7 @@ static int crash_nmi_callback(struct notifier_block *self,
                crash_fixup_ss_esp(&fixed_regs, regs);
                regs = &fixed_regs;
        }
-       crash_save_this_cpu(regs, cpu);
+       crash_save_cpu(regs, cpu);
        disable_local_APIC();
        atomic_dec(&waiting_for_crash_ipi);
        /* Assume hlt works */
@@ -195,5 +133,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
 #if defined(CONFIG_X86_IO_APIC)
        disable_IO_APIC();
 #endif
-       crash_save_self(regs);
+       crash_save_cpu(regs, safe_smp_processor_id());
 }
diff --git a/arch/i386/kernel/e820.c b/arch/i386/kernel/e820.c
new file mode 100644 (file)
index 0000000..2f7d0a9
--- /dev/null
@@ -0,0 +1,894 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/efi.h>
+#include <linux/pfn.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/e820.h>
+
+#ifdef CONFIG_EFI
+int efi_enabled = 0;
+EXPORT_SYMBOL(efi_enabled);
+#endif
+
+struct e820map e820;
+struct change_member {
+       struct e820entry *pbios; /* pointer to original bios entry */
+       unsigned long long addr; /* address for this change point */
+};
+static struct change_member change_point_list[2*E820MAX] __initdata;
+static struct change_member *change_point[2*E820MAX] __initdata;
+static struct e820entry *overlap_list[E820MAX] __initdata;
+static struct e820entry new_bios[E820MAX] __initdata;
+/* For PCI or other memory-mapped resources */
+unsigned long pci_mem_start = 0x10000000;
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_mem_start);
+#endif
+extern int user_defined_memmap;
+struct resource data_resource = {
+       .name   = "Kernel data",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+struct resource code_resource = {
+       .name   = "Kernel code",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource system_rom_resource = {
+       .name   = "System ROM",
+       .start  = 0xf0000,
+       .end    = 0xfffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource extension_rom_resource = {
+       .name   = "Extension ROM",
+       .start  = 0xe0000,
+       .end    = 0xeffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource adapter_rom_resources[] = { {
+       .name   = "Adapter ROM",
+       .start  = 0xc8000,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+}, {
+       .name   = "Adapter ROM",
+       .start  = 0,
+       .end    = 0,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+} };
+
+static struct resource video_rom_resource = {
+       .name   = "Video ROM",
+       .start  = 0xc0000,
+       .end    = 0xc7fff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+};
+
+static struct resource video_ram_resource = {
+       .name   = "Video RAM area",
+       .start  = 0xa0000,
+       .end    = 0xbffff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource standard_io_resources[] = { {
+       .name   = "dma1",
+       .start  = 0x0000,
+       .end    = 0x001f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "pic1",
+       .start  = 0x0020,
+       .end    = 0x0021,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "timer0",
+       .start  = 0x0040,
+       .end    = 0x0043,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "timer1",
+       .start  = 0x0050,
+       .end    = 0x0053,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "keyboard",
+       .start  = 0x0060,
+       .end    = 0x006f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "dma page reg",
+       .start  = 0x0080,
+       .end    = 0x008f,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "pic2",
+       .start  = 0x00a0,
+       .end    = 0x00a1,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "dma2",
+       .start  = 0x00c0,
+       .end    = 0x00df,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+}, {
+       .name   = "fpu",
+       .start  = 0x00f0,
+       .end    = 0x00ff,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
+} };
+
+static int romsignature(const unsigned char *x)
+{
+       unsigned short sig;
+       int ret = 0;
+       if (probe_kernel_address((const unsigned short *)x, sig) == 0)
+               ret = (sig == 0xaa55);
+       return ret;
+}
+
+static int __init romchecksum(unsigned char *rom, unsigned long length)
+{
+       unsigned char *p, sum = 0;
+
+       for (p = rom; p < rom + length; p++)
+               sum += *p;
+       return sum == 0;
+}
+
+static void __init probe_roms(void)
+{
+       unsigned long start, length, upper;
+       unsigned char *rom;
+       int           i;
+
+       /* video rom */
+       upper = adapter_rom_resources[0].start;
+       for (start = video_rom_resource.start; start < upper; start += 2048) {
+               rom = isa_bus_to_virt(start);
+               if (!romsignature(rom))
+                       continue;
+
+               video_rom_resource.start = start;
+
+               /* 0 < length <= 0x7f * 512, historically */
+               length = rom[2] * 512;
+
+               /* if checksum okay, trust length byte */
+               if (length && romchecksum(rom, length))
+                       video_rom_resource.end = start + length - 1;
+
+               request_resource(&iomem_resource, &video_rom_resource);
+               break;
+       }
+
+       start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+       if (start < upper)
+               start = upper;
+
+       /* system rom */
+       request_resource(&iomem_resource, &system_rom_resource);
+       upper = system_rom_resource.start;
+
+       /* check for extension rom (ignore length byte!) */
+       rom = isa_bus_to_virt(extension_rom_resource.start);
+       if (romsignature(rom)) {
+               length = extension_rom_resource.end - extension_rom_resource.start + 1;
+               if (romchecksum(rom, length)) {
+                       request_resource(&iomem_resource, &extension_rom_resource);
+                       upper = extension_rom_resource.start;
+               }
+       }
+
+       /* check for adapter roms on 2k boundaries */
+       for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
+               rom = isa_bus_to_virt(start);
+               if (!romsignature(rom))
+                       continue;
+
+               /* 0 < length <= 0x7f * 512, historically */
+               length = rom[2] * 512;
+
+               /* but accept any length that fits if checksum okay */
+               if (!length || start + length > upper || !romchecksum(rom, length))
+                       continue;
+
+               adapter_rom_resources[i].start = start;
+               adapter_rom_resources[i].end = start + length - 1;
+               request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+               start = adapter_rom_resources[i++].end & ~2047UL;
+       }
+}
+
+/*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+static void __init
+legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+{
+       int i;
+
+       probe_roms();
+       for (i = 0; i < e820.nr_map; i++) {
+               struct resource *res;
+#ifndef CONFIG_RESOURCES_64BIT
+               if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+                       continue;
+#endif
+               res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+               switch (e820.map[i].type) {
+               case E820_RAM:  res->name = "System RAM"; break;
+               case E820_ACPI: res->name = "ACPI Tables"; break;
+               case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
+               default:        res->name = "reserved";
+               }
+               res->start = e820.map[i].addr;
+               res->end = res->start + e820.map[i].size - 1;
+               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               if (request_resource(&iomem_resource, res)) {
+                       kfree(res);
+                       continue;
+               }
+               if (e820.map[i].type == E820_RAM) {
+                       /*
+                        *  We don't know which RAM region contains kernel data,
+                        *  so we try it repeatedly and let the resource manager
+                        *  test it.
+                        */
+                       request_resource(res, code_resource);
+                       request_resource(res, data_resource);
+#ifdef CONFIG_KEXEC
+                       request_resource(res, &crashk_res);
+#endif
+               }
+       }
+}
+
+/*
+ * Request address space for all standard resources
+ *
+ * This is called just before pcibios_init(), which is also a
+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
+ */
+static int __init request_standard_resources(void)
+{
+       int i;
+
+       printk("Setting up standard PCI resources\n");
+       if (efi_enabled)
+               efi_initialize_iomem_resources(&code_resource, &data_resource);
+       else
+               legacy_init_iomem_resources(&code_resource, &data_resource);
+
+       /* EFI systems may still have VGA */
+       request_resource(&iomem_resource, &video_ram_resource);
+
+       /* request I/O space for devices used on all i[345]86 PCs */
+       for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
+               request_resource(&ioport_resource, &standard_io_resources[i]);
+       return 0;
+}
+
+subsys_initcall(request_standard_resources);
+
+void __init add_memory_region(unsigned long long start,
+                             unsigned long long size, int type)
+{
+       int x;
+
+       if (!efi_enabled) {
+                       x = e820.nr_map;
+
+               if (x == E820MAX) {
+                   printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+                   return;
+               }
+
+               e820.map[x].addr = start;
+               e820.map[x].size = size;
+               e820.map[x].type = type;
+               e820.nr_map++;
+       }
+} /* add_memory_region */
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries.  The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+{
+       struct change_member *change_tmp;
+       unsigned long current_type, last_type;
+       unsigned long long last_addr;
+       int chgidx, still_changing;
+       int overlap_entries;
+       int new_bios_entry;
+       int old_nr, new_nr, chg_nr;
+       int i;
+
+       /*
+               Visually we're performing the following (1,2,3,4 = memory types)...
+
+               Sample memory map (w/overlaps):
+                  ____22__________________
+                  ______________________4_
+                  ____1111________________
+                  _44_____________________
+                  11111111________________
+                  ____________________33__
+                  ___________44___________
+                  __________33333_________
+                  ______________22________
+                  ___________________2222_
+                  _________111111111______
+                  _____________________11_
+                  _________________4______
+
+               Sanitized equivalent (no overlap):
+                  1_______________________
+                  _44_____________________
+                  ___1____________________
+                  ____22__________________
+                  ______11________________
+                  _________1______________
+                  __________3_____________
+                  ___________44___________
+                  _____________33_________
+                  _______________2________
+                  ________________1_______
+                  _________________4______
+                  ___________________2____
+                  ____________________33__
+                  ______________________4_
+       */
+       printk("sanitize start\n");
+       /* if there's only one memory region, don't bother */
+       if (*pnr_map < 2) {
+               printk("sanitize bail 0\n");
+               return -1;
+       }
+
+       old_nr = *pnr_map;
+
+       /* bail out if we find any unreasonable addresses in bios map */
+       for (i=0; i<old_nr; i++)
+               if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr) {
+                       printk("sanitize bail 1\n");
+                       return -1;
+               }
+
+       /* create pointers for initial change-point information (for sorting) */
+       for (i=0; i < 2*old_nr; i++)
+               change_point[i] = &change_point_list[i];
+
+       /* record all known change-points (starting and ending addresses),
+          omitting those that are for empty memory regions */
+       chgidx = 0;
+       for (i=0; i < old_nr; i++)      {
+               if (biosmap[i].size != 0) {
+                       change_point[chgidx]->addr = biosmap[i].addr;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+                       change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+                       change_point[chgidx++]->pbios = &biosmap[i];
+               }
+       }
+       chg_nr = chgidx;        /* true number of change-points */
+
+       /* sort change-point list by memory addresses (low -> high) */
+       still_changing = 1;
+       while (still_changing)  {
+               still_changing = 0;
+               for (i=1; i < chg_nr; i++)  {
+                       /* if <current_addr> > <last_addr>, swap */
+                       /* or, if current=<start_addr> & last=<end_addr>, swap */
+                       if ((change_point[i]->addr < change_point[i-1]->addr) ||
+                               ((change_point[i]->addr == change_point[i-1]->addr) &&
+                                (change_point[i]->addr == change_point[i]->pbios->addr) &&
+                                (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+                          )
+                       {
+                               change_tmp = change_point[i];
+                               change_point[i] = change_point[i-1];
+                               change_point[i-1] = change_tmp;
+                               still_changing=1;
+                       }
+               }
+       }
+
+       /* create a new bios memory map, removing overlaps */
+       overlap_entries=0;       /* number of entries in the overlap table */
+       new_bios_entry=0;        /* index for creating new bios map entries */
+       last_type = 0;           /* start with undefined memory type */
+       last_addr = 0;           /* start with 0 as last starting address */
+       /* loop through change-points, determining affect on the new bios map */
+       for (chgidx=0; chgidx < chg_nr; chgidx++)
+       {
+               /* keep track of all overlapping bios entries */
+               if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+               {
+                       /* add map entry to overlap list (> 1 entry implies an overlap) */
+                       overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+               }
+               else
+               {
+                       /* remove entry from list (order independent, so swap with last) */
+                       for (i=0; i<overlap_entries; i++)
+                       {
+                               if (overlap_list[i] == change_point[chgidx]->pbios)
+                                       overlap_list[i] = overlap_list[overlap_entries-1];
+                       }
+                       overlap_entries--;
+               }
+               /* if there are overlapping entries, decide which "type" to use */
+               /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+               current_type = 0;
+               for (i=0; i<overlap_entries; i++)
+                       if (overlap_list[i]->type > current_type)
+                               current_type = overlap_list[i]->type;
+               /* continue building up new bios map based on this information */
+               if (current_type != last_type)  {
+                       if (last_type != 0)      {
+                               new_bios[new_bios_entry].size =
+                                       change_point[chgidx]->addr - last_addr;
+                               /* move forward only if the new size was non-zero */
+                               if (new_bios[new_bios_entry].size != 0)
+                                       if (++new_bios_entry >= E820MAX)
+                                               break;  /* no more space left for new bios entries */
+                       }
+                       if (current_type != 0)  {
+                               new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+                               new_bios[new_bios_entry].type = current_type;
+                               last_addr=change_point[chgidx]->addr;
+                       }
+                       last_type = current_type;
+               }
+       }
+       new_nr = new_bios_entry;   /* retain count for new bios entries */
+
+       /* copy new bios mapping into original location */
+       memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+       *pnr_map = new_nr;
+
+       printk("sanitize end\n");
+       return 0;
+}
+
+/*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory.  If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+{
+       /* Only one memory region (or negative)? Ignore it */
+       if (nr_map < 2)
+               return -1;
+
+       do {
+               unsigned long long start = biosmap->addr;
+               unsigned long long size = biosmap->size;
+               unsigned long long end = start + size;
+               unsigned long type = biosmap->type;
+               printk("copy_e820_map() start: %016Lx size: %016Lx end: %016Lx type: %ld\n", start, size, end, type);
+
+               /* Overflow in 64 bits? Ignore the memory map. */
+               if (start > end)
+                       return -1;
+
+               /*
+                * Some BIOSes claim RAM in the 640k - 1M region.
+                * Not right. Fix it up.
+                */
+               if (type == E820_RAM) {
+                       printk("copy_e820_map() type is E820_RAM\n");
+                       if (start < 0x100000ULL && end > 0xA0000ULL) {
+                               printk("copy_e820_map() lies in range...\n");
+                               if (start < 0xA0000ULL) {
+                                       printk("copy_e820_map() start < 0xA0000ULL\n");
+                                       add_memory_region(start, 0xA0000ULL-start, type);
+                               }
+                               if (end <= 0x100000ULL) {
+                                       printk("copy_e820_map() end <= 0x100000ULL\n");
+                                       continue;
+                               }
+                               start = 0x100000ULL;
+                               size = end - start;
+                       }
+               }
+               add_memory_region(start, size, type);
+       } while (biosmap++,--nr_map);
+       return 0;
+}
+
+/*
+ * Callback for efi_memory_walk.
+ */
+static int __init
+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+{
+       unsigned long *max_pfn = arg, pfn;
+
+       if (start < end) {
+               pfn = PFN_UP(end -1);
+               if (pfn > *max_pfn)
+                       *max_pfn = pfn;
+       }
+       return 0;
+}
+
+static int __init
+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+{
+       memory_present(0, PFN_UP(start), PFN_DOWN(end));
+       return 0;
+}
+
+/*
+ * Find the highest page frame number we have available
+ */
+void __init find_max_pfn(void)
+{
+       int i;
+
+       max_pfn = 0;
+       if (efi_enabled) {
+               efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+               efi_memmap_walk(efi_memory_present_wrapper, NULL);
+               return;
+       }
+
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long start, end;
+               /* RAM? */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               start = PFN_UP(e820.map[i].addr);
+               end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+               if (start >= end)
+                       continue;
+               if (end > max_pfn)
+                       max_pfn = end;
+               memory_present(0, start, end);
+       }
+}
+
+/*
+ * Free all available memory for boot time allocation.  Used
+ * as a callback function by efi_memory_walk()
+ */
+
+static int __init
+free_available_memory(unsigned long start, unsigned long end, void *arg)
+{
+       /* check max_low_pfn */
+       if (start >= (max_low_pfn << PAGE_SHIFT))
+               return 0;
+       if (end >= (max_low_pfn << PAGE_SHIFT))
+               end = max_low_pfn << PAGE_SHIFT;
+       if (start < end)
+               free_bootmem(start, end - start);
+
+       return 0;
+}
+/*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+{
+       int i;
+
+       if (efi_enabled) {
+               efi_memmap_walk(free_available_memory, NULL);
+               return;
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               unsigned long curr_pfn, last_pfn, size;
+               /*
+                * Reserve usable low memory
+                */
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+               /*
+                * We are rounding up the start address of usable memory:
+                */
+               curr_pfn = PFN_UP(e820.map[i].addr);
+               if (curr_pfn >= max_low_pfn)
+                       continue;
+               /*
+                * ... and at the end of the usable range downwards:
+                */
+               last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+               if (last_pfn > max_low_pfn)
+                       last_pfn = max_low_pfn;
+
+               /*
+                * .. finally, did all the rounding and playing
+                * around just make the area go away?
+                */
+               if (last_pfn <= curr_pfn)
+                       continue;
+
+               size = last_pfn - curr_pfn;
+               free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+       }
+}
+
+void __init register_memory(void)
+{
+       unsigned long gapstart, gapsize, round;
+       unsigned long long last;
+       int i;
+
+       /*
+        * Search for the bigest gap in the low 32 bits of the e820
+        * memory space.
+        */
+       last = 0x100000000ull;
+       gapstart = 0x10000000;
+       gapsize = 0x400000;
+       i = e820.nr_map;
+       while (--i >= 0) {
+               unsigned long long start = e820.map[i].addr;
+               unsigned long long end = start + e820.map[i].size;
+
+               /*
+                * Since "last" is at most 4GB, we know we'll
+                * fit in 32 bits if this condition is true
+                */
+               if (last > end) {
+                       unsigned long gap = last - end;
+
+                       if (gap > gapsize) {
+                               gapsize = gap;
+                               gapstart = end;
+                       }
+               }
+               if (start < last)
+                       last = start;
+       }
+
+       /*
+        * See how much we want to round up: start off with
+        * rounding to the next 1MB area.
+        */
+       round = 0x100000;
+       while ((gapsize >> 4) > round)
+               round += round;
+       /* Fun with two's complement */
+       pci_mem_start = (gapstart + round) & -round;
+
+       printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+               pci_mem_start, gapstart, gapsize);
+}
+
+void __init print_memory_map(char *who)
+{
+       int i;
+
+       for (i = 0; i < e820.nr_map; i++) {
+               printk(" %s: %016Lx - %016Lx ", who,
+                       e820.map[i].addr,
+                       e820.map[i].addr + e820.map[i].size);
+               switch (e820.map[i].type) {
+               case E820_RAM:  printk("(usable)\n");
+                               break;
+               case E820_RESERVED:
+                               printk("(reserved)\n");
+                               break;
+               case E820_ACPI:
+                               printk("(ACPI data)\n");
+                               break;
+               case E820_NVS:
+                               printk("(ACPI NVS)\n");
+                               break;
+               default:        printk("type %lu\n", e820.map[i].type);
+                               break;
+               }
+       }
+}
+
+static __init __always_inline void efi_limit_regions(unsigned long long size)
+{
+       unsigned long long current_addr = 0;
+       efi_memory_desc_t *md, *next_md;
+       void *p, *p1;
+       int i, j;
+
+       j = 0;
+       p1 = memmap.map;
+       for (p = p1, i = 0; p < memmap.map_end; p += memmap.desc_size, i++) {
+               md = p;
+               next_md = p1;
+               current_addr = md->phys_addr +
+                       PFN_PHYS(md->num_pages);
+               if (is_available_memory(md)) {
+                       if (md->phys_addr >= size) continue;
+                       memcpy(next_md, md, memmap.desc_size);
+                       if (current_addr >= size) {
+                               next_md->num_pages -=
+                                       PFN_UP(current_addr-size);
+                       }
+                       p1 += memmap.desc_size;
+                       next_md = p1;
+                       j++;
+               } else if ((md->attribute & EFI_MEMORY_RUNTIME) ==
+                          EFI_MEMORY_RUNTIME) {
+                       /* In order to make runtime services
+                        * available we have to include runtime
+                        * memory regions in memory map */
+                       memcpy(next_md, md, memmap.desc_size);
+                       p1 += memmap.desc_size;
+                       next_md = p1;
+                       j++;
+               }
+       }
+       memmap.nr_map = j;
+       memmap.map_end = memmap.map +
+               (memmap.nr_map * memmap.desc_size);
+}
+
+void __init limit_regions(unsigned long long size)
+{
+       unsigned long long current_addr;
+       int i;
+
+       print_memory_map("limit_regions start");
+       if (efi_enabled) {
+               efi_limit_regions(size);
+               return;
+       }
+       for (i = 0; i < e820.nr_map; i++) {
+               current_addr = e820.map[i].addr + e820.map[i].size;
+               if (current_addr < size)
+                       continue;
+
+               if (e820.map[i].type != E820_RAM)
+                       continue;
+
+               if (e820.map[i].addr >= size) {
+                       /*
+                        * This region starts past the end of the
+                        * requested size, skip it completely.
+                        */
+                       e820.nr_map = i;
+               } else {
+                       e820.nr_map = i + 1;
+                       e820.map[i].size -= current_addr - size;
+               }
+               print_memory_map("limit_regions endfor");
+               return;
+       }
+       print_memory_map("limit_regions endfunc");
+}
+
+ /*
+  * This function checks if the entire range <start,end> is mapped with type.
+  *
+  * Note: this function only works correct if the e820 table is sorted and
+  * not-overlapping, which is the case
+  */
+int __init
+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
+{
+       u64 start = s;
+       u64 end = e;
+       int i;
+       for (i = 0; i < e820.nr_map; i++) {
+               struct e820entry *ei = &e820.map[i];
+               if (type && ei->type != type)
+                       continue;
+               /* is the region (part) in overlap with the current region ?*/
+               if (ei->addr >= end || ei->addr + ei->size <= start)
+                       continue;
+               /* if the region is at the beginning of <start,end> we move
+                * start to the end of the region since it's ok until there
+                */
+               if (ei->addr <= start)
+                       start = ei->addr + ei->size;
+               /* if start is now at or beyond end, we're done, full
+                * coverage */
+               if (start >= end)
+                       return 1; /* we're done */
+       }
+       return 0;
+}
+
+static int __init parse_memmap(char *arg)
+{
+       if (!arg)
+               return -EINVAL;
+
+       if (strcmp(arg, "exactmap") == 0) {
+#ifdef CONFIG_CRASH_DUMP
+               /* If we are doing a crash dump, we
+                * still need to know the real mem
+                * size before original memory map is
+                * reset.
+                */
+               find_max_pfn();
+               saved_max_pfn = max_pfn;
+#endif
+               e820.nr_map = 0;
+               user_defined_memmap = 1;
+       } else {
+               /* If the user specifies memory size, we
+                * limit the BIOS-provided memory map to
+                * that size. exactmap can be used to specify
+                * the exact map. mem=number can be used to
+                * trim the existing memory map.
+                */
+               unsigned long long start_at, mem_size;
+
+               mem_size = memparse(arg, &arg);
+               if (*arg == '@') {
+                       start_at = memparse(arg+1, &arg);
+                       add_memory_region(start_at, mem_size, E820_RAM);
+               } else if (*arg == '#') {
+                       start_at = memparse(arg+1, &arg);
+                       add_memory_region(start_at, mem_size, E820_ACPI);
+               } else if (*arg == '$') {
+                       start_at = memparse(arg+1, &arg);
+                       add_memory_region(start_at, mem_size, E820_RESERVED);
+               } else {
+                       limit_regions(mem_size);
+                       user_defined_memmap = 1;
+               }
+       }
+       return 0;
+}
+early_param("memmap", parse_memmap);
index 8b40648d0ef00f2570ea2f19e07da3e1f851d41f..b92c7f0a358aa79836a2ccbc56cd7bf386555a80 100644 (file)
@@ -194,17 +194,24 @@ inline int efi_set_rtc_mmss(unsigned long nowtime)
        return 0;
 }
 /*
- * This should only be used during kernel init and before runtime
- * services have been remapped, therefore, we'll need to call in physical
- * mode.  Note, this call isn't used later, so mark it __init.
+ * This is used during kernel init before runtime
+ * services have been remapped and also during suspend, therefore,
+ * we'll need to call both in physical and virtual modes.
  */
-inline unsigned long __init efi_get_time(void)
+inline unsigned long efi_get_time(void)
 {
        efi_status_t status;
        efi_time_t eft;
        efi_time_cap_t cap;
 
-       status = phys_efi_get_time(&eft, &cap);
+       if (efi.get_time) {
+               /* if we are in virtual mode use remapped function */
+               status = efi.get_time(&eft, &cap);
+       } else {
+               /* we are in physical mode */
+               status = phys_efi_get_time(&eft, &cap);
+       }
+
        if (status != EFI_SUCCESS)
                printk("Oops: efitime: can't read time status: 0x%lx\n",status);
 
index 5a63d6fdb70e4ff75d09b69874334b758d5af07a..de34b7fed3c1718d88886fa3e24a94fa49ac662f 100644 (file)
  *     18(%esp) - %eax
  *     1C(%esp) - %ds
  *     20(%esp) - %es
- *     24(%esp) - orig_eax
- *     28(%esp) - %eip
- *     2C(%esp) - %cs
- *     30(%esp) - %eflags
- *     34(%esp) - %oldesp
- *     38(%esp) - %oldss
+ *     24(%esp) - %gs
+ *     28(%esp) - orig_eax
+ *     2C(%esp) - %eip
+ *     30(%esp) - %cs
+ *     34(%esp) - %eflags
+ *     38(%esp) - %oldesp
+ *     3C(%esp) - %oldss
  *
  * "current" is in register %ebx during any slow entries.
  */
 #include <asm/smp.h>
 #include <asm/page.h>
 #include <asm/desc.h>
+#include <asm/percpu.h>
 #include <asm/dwarf2.h>
 #include "irq_vectors.h"
 
-#define nr_syscalls ((syscall_table_size)/4)
+/*
+ * We use macros for low-level operations which need to be overridden
+ * for paravirtualization.  The following will never clobber any registers:
+ *   INTERRUPT_RETURN (aka. "iret")
+ *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+ *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
+ *
+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+ * Allowing a register to be clobbered can shrink the paravirt replacement
+ * enough to patch inline, increasing performance.
+ */
 
-EBX            = 0x00
-ECX            = 0x04
-EDX            = 0x08
-ESI            = 0x0C
-EDI            = 0x10
-EBP            = 0x14
-EAX            = 0x18
-DS             = 0x1C
-ES             = 0x20
-ORIG_EAX       = 0x24
-EIP            = 0x28
-CS             = 0x2C
-EFLAGS         = 0x30
-OLDESP         = 0x34
-OLDSS          = 0x38
+#define nr_syscalls ((syscall_table_size)/4)
 
 CF_MASK                = 0x00000001
 TF_MASK                = 0x00000100
@@ -76,23 +75,16 @@ DF_MASK             = 0x00000400
 NT_MASK                = 0x00004000
 VM_MASK                = 0x00020000
 
-/* These are replaces for paravirtualization */
-#define DISABLE_INTERRUPTS             cli
-#define ENABLE_INTERRUPTS              sti
-#define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
-#define INTERRUPT_RETURN               iret
-#define GET_CR0_INTO_EAX               movl %cr0, %eax
-
 #ifdef CONFIG_PREEMPT
-#define preempt_stop           DISABLE_INTERRUPTS; TRACE_IRQS_OFF
+#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 #else
-#define preempt_stop
+#define preempt_stop(clobbers)
 #define resume_kernel          restore_nocheck
 #endif
 
 .macro TRACE_IRQS_IRET
 #ifdef CONFIG_TRACE_IRQFLAGS
-       testl $IF_MASK,EFLAGS(%esp)     # interrupts off?
+       testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
        jz 1f
        TRACE_IRQS_ON
 1:
@@ -107,6 +99,9 @@ VM_MASK              = 0x00020000
 
 #define SAVE_ALL \
        cld; \
+       pushl %gs; \
+       CFI_ADJUST_CFA_OFFSET 4;\
+       /*CFI_REL_OFFSET gs, 0;*/\
        pushl %es; \
        CFI_ADJUST_CFA_OFFSET 4;\
        /*CFI_REL_OFFSET es, 0;*/\
@@ -136,7 +131,9 @@ VM_MASK             = 0x00020000
        CFI_REL_OFFSET ebx, 0;\
        movl $(__USER_DS), %edx; \
        movl %edx, %ds; \
-       movl %edx, %es;
+       movl %edx, %es; \
+       movl $(__KERNEL_PDA), %edx; \
+       movl %edx, %gs
 
 #define RESTORE_INT_REGS \
        popl %ebx;      \
@@ -169,17 +166,22 @@ VM_MASK           = 0x00020000
 2:     popl %es;       \
        CFI_ADJUST_CFA_OFFSET -4;\
        /*CFI_RESTORE es;*/\
-.section .fixup,"ax";  \
-3:     movl $0,(%esp); \
-       jmp 1b;         \
+3:     popl %gs;       \
+       CFI_ADJUST_CFA_OFFSET -4;\
+       /*CFI_RESTORE gs;*/\
+.pushsection .fixup,"ax";      \
 4:     movl $0,(%esp); \
+       jmp 1b;         \
+5:     movl $0,(%esp); \
        jmp 2b;         \
-.previous;             \
+6:     movl $0,(%esp); \
+       jmp 3b;         \
 .section __ex_table,"a";\
        .align 4;       \
-       .long 1b,3b;    \
-       .long 2b,4b;    \
-.previous
+       .long 1b,4b;    \
+       .long 2b,5b;    \
+       .long 3b,6b;    \
+.popsection
 
 #define RING0_INT_FRAME \
        CFI_STARTPROC simple;\
@@ -198,18 +200,18 @@ VM_MASK           = 0x00020000
 #define RING0_PTREGS_FRAME \
        CFI_STARTPROC simple;\
        CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, OLDESP-EBX;\
-       /*CFI_OFFSET cs, CS-OLDESP;*/\
-       CFI_OFFSET eip, EIP-OLDESP;\
-       /*CFI_OFFSET es, ES-OLDESP;*/\
-       /*CFI_OFFSET ds, DS-OLDESP;*/\
-       CFI_OFFSET eax, EAX-OLDESP;\
-       CFI_OFFSET ebp, EBP-OLDESP;\
-       CFI_OFFSET edi, EDI-OLDESP;\
-       CFI_OFFSET esi, ESI-OLDESP;\
-       CFI_OFFSET edx, EDX-OLDESP;\
-       CFI_OFFSET ecx, ECX-OLDESP;\
-       CFI_OFFSET ebx, EBX-OLDESP
+       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
+       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
+       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
+       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
+       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
+       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
+       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
+       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
+       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
+       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
+       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+       CFI_OFFSET ebx, PT_EBX-PT_OLDESP
 
 ENTRY(ret_from_fork)
        CFI_STARTPROC
@@ -237,17 +239,18 @@ ENTRY(ret_from_fork)
        ALIGN
        RING0_PTREGS_FRAME
 ret_from_exception:
-       preempt_stop
+       preempt_stop(CLBR_ANY)
 ret_from_intr:
        GET_THREAD_INFO(%ebp)
 check_userspace:
-       movl EFLAGS(%esp), %eax         # mix EFLAGS and CS
-       movb CS(%esp), %al
+       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
+       movb PT_CS(%esp), %al
        andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
        cmpl $USER_RPL, %eax
        jb resume_kernel                # not returning to v8086 or userspace
+
 ENTRY(resume_userspace)
-       DISABLE_INTERRUPTS              # make sure we don't miss an interrupt
+       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
        movl TI_flags(%ebp), %ecx
@@ -258,14 +261,14 @@ ENTRY(resume_userspace)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       DISABLE_INTERRUPTS
+       DISABLE_INTERRUPTS(CLBR_ANY)
        cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
        jnz restore_nocheck
 need_resched:
        movl TI_flags(%ebp), %ecx       # need_resched set ?
        testb $_TIF_NEED_RESCHED, %cl
        jz restore_all
-       testl $IF_MASK,EFLAGS(%esp)     # interrupts off (exception path) ?
+       testl $IF_MASK,PT_EFLAGS(%esp)  # interrupts off (exception path) ?
        jz restore_all
        call preempt_schedule_irq
        jmp need_resched
@@ -287,7 +290,7 @@ sysenter_past_esp:
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs and here we enable it straight after entry:
         */
-       ENABLE_INTERRUPTS
+       ENABLE_INTERRUPTS(CLBR_NONE)
        pushl $(__USER_DS)
        CFI_ADJUST_CFA_OFFSET 4
        /*CFI_REL_OFFSET ss, 0*/
@@ -331,20 +334,27 @@ sysenter_past_esp:
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
        call *sys_call_table(,%eax,4)
-       movl %eax,EAX(%esp)
-       DISABLE_INTERRUPTS
+       movl %eax,PT_EAX(%esp)
+       DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
        testw $_TIF_ALLWORK_MASK, %cx
        jne syscall_exit_work
 /* if something modifies registers it must also disable sysexit */
-       movl EIP(%esp), %edx
-       movl OLDESP(%esp), %ecx
+       movl PT_EIP(%esp), %edx
+       movl PT_OLDESP(%esp), %ecx
        xorl %ebp,%ebp
        TRACE_IRQS_ON
+1:     mov  PT_GS(%esp), %gs
        ENABLE_INTERRUPTS_SYSEXIT
        CFI_ENDPROC
-
+.pushsection .fixup,"ax"
+2:     movl $0,PT_GS(%esp)
+       jmp 1b
+.section __ex_table,"a"
+       .align 4
+       .long 1b,2b
+.popsection
 
        # system call handler stub
 ENTRY(system_call)
@@ -353,7 +363,7 @@ ENTRY(system_call)
        CFI_ADJUST_CFA_OFFSET 4
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-       testl $TF_MASK,EFLAGS(%esp)
+       testl $TF_MASK,PT_EFLAGS(%esp)
        jz no_singlestep
        orl $_TIF_SINGLESTEP,TI_flags(%ebp)
 no_singlestep:
@@ -365,9 +375,9 @@ no_singlestep:
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
-       movl %eax,EAX(%esp)             # store the return value
+       movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
-       DISABLE_INTERRUPTS              # make sure we don't miss an interrupt
+       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
        TRACE_IRQS_OFF
@@ -376,12 +386,12 @@ syscall_exit:
        jne syscall_exit_work
 
 restore_all:
-       movl EFLAGS(%esp), %eax         # mix EFLAGS, SS and CS
-       # Warning: OLDSS(%esp) contains the wrong/random values if we
+       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
+       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
        # are returning to the kernel.
        # See comments in process.c:copy_thread() for details.
-       movb OLDSS(%esp), %ah
-       movb CS(%esp), %al
+       movb PT_OLDSS(%esp), %ah
+       movb PT_CS(%esp), %al
        andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
        cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
        CFI_REMEMBER_STATE
@@ -390,13 +400,13 @@ restore_nocheck:
        TRACE_IRQS_IRET
 restore_nocheck_notrace:
        RESTORE_REGS
-       addl $4, %esp
+       addl $4, %esp                   # skip orig_eax/error_code
        CFI_ADJUST_CFA_OFFSET -4
 1:     INTERRUPT_RETURN
 .section .fixup,"ax"
 iret_exc:
        TRACE_IRQS_ON
-       ENABLE_INTERRUPTS
+       ENABLE_INTERRUPTS(CLBR_NONE)
        pushl $0                        # no error code
        pushl $do_iret_error
        jmp error_code
@@ -408,33 +418,42 @@ iret_exc:
 
        CFI_RESTORE_STATE
 ldt_ss:
-       larl OLDSS(%esp), %eax
+       larl PT_OLDSS(%esp), %eax
        jnz restore_nocheck
        testl $0x00400000, %eax         # returning to 32bit stack?
        jnz restore_nocheck             # allright, normal return
+
+#ifdef CONFIG_PARAVIRT
+       /*
+        * The kernel can't run on a non-flat stack if paravirt mode
+        * is active.  Rather than try to fixup the high bits of
+        * ESP, bypass this code entirely.  This may break DOSemu
+        * and/or Wine support in a paravirt VM, although the option
+        * is still available to implement the setting of the high
+        * 16-bits in the INTERRUPT_RETURN paravirt-op.
+        */
+       cmpl $0, paravirt_ops+PARAVIRT_enabled
+       jne restore_nocheck
+#endif
+
        /* If returning to userspace with 16bit stack,
         * try to fix the higher word of ESP, as the CPU
         * won't restore it.
         * This is an "official" bug of all the x86-compatible
         * CPUs, which we can try to work around to make
         * dosemu and wine happy. */
-       subl $8, %esp           # reserve space for switch16 pointer
-       CFI_ADJUST_CFA_OFFSET 8
-       DISABLE_INTERRUPTS
+       movl PT_OLDESP(%esp), %eax
+       movl %esp, %edx
+       call patch_espfix_desc
+       pushl $__ESPFIX_SS
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl %eax
+       CFI_ADJUST_CFA_OFFSET 4
+       DISABLE_INTERRUPTS(CLBR_EAX)
        TRACE_IRQS_OFF
-       movl %esp, %eax
-       /* Set up the 16bit stack frame with switch32 pointer on top,
-        * and a switch16 pointer on top of the current frame. */
-       call setup_x86_bogus_stack
-       CFI_ADJUST_CFA_OFFSET -8        # frame has moved
-       TRACE_IRQS_IRET
-       RESTORE_REGS
-       lss 20+4(%esp), %esp    # switch to 16bit stack
-1:     INTERRUPT_RETURN
-.section __ex_table,"a"
-       .align 4
-       .long 1b,iret_exc
-.previous
+       lss (%esp), %esp
+       CFI_ADJUST_CFA_OFFSET -8
+       jmp restore_nocheck
        CFI_ENDPROC
 
        # perform work that needs to be done immediately before resumption
@@ -445,7 +464,7 @@ work_pending:
        jz work_notifysig
 work_resched:
        call schedule
-       DISABLE_INTERRUPTS              # make sure we don't miss an interrupt
+       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
                                        # between sampling and the iret
        TRACE_IRQS_OFF
@@ -458,7 +477,8 @@ work_resched:
 
 work_notifysig:                                # deal with pending signals and
                                        # notify-resume requests
-       testl $VM_MASK, EFLAGS(%esp)
+#ifdef CONFIG_VM86
+       testl $VM_MASK, PT_EFLAGS(%esp)
        movl %esp, %eax
        jne work_notifysig_v86          # returning to kernel-space or
                                        # vm86-space
@@ -468,29 +488,30 @@ work_notifysig:                           # deal with pending signals and
 
        ALIGN
 work_notifysig_v86:
-#ifdef CONFIG_VM86
        pushl %ecx                      # save ti_flags for do_notify_resume
        CFI_ADJUST_CFA_OFFSET 4
        call save_v86_state             # %eax contains pt_regs pointer
        popl %ecx
        CFI_ADJUST_CFA_OFFSET -4
        movl %eax, %esp
+#else
+       movl %esp, %eax
+#endif
        xorl %edx, %edx
        call do_notify_resume
        jmp resume_userspace_sig
-#endif
 
        # perform syscall exit tracing
        ALIGN
 syscall_trace_entry:
-       movl $-ENOSYS,EAX(%esp)
+       movl $-ENOSYS,PT_EAX(%esp)
        movl %esp, %eax
        xorl %edx,%edx
        call do_syscall_trace
        cmpl $0, %eax
        jne resume_userspace            # ret != 0 -> running under PTRACE_SYSEMU,
                                        # so must skip actual syscall
-       movl ORIG_EAX(%esp), %eax
+       movl PT_ORIG_EAX(%esp), %eax
        cmpl $(nr_syscalls), %eax
        jnae syscall_call
        jmp syscall_exit
@@ -501,7 +522,7 @@ syscall_exit_work:
        testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
        jz work_pending
        TRACE_IRQS_ON
-       ENABLE_INTERRUPTS               # could let do_syscall_trace() call
+       ENABLE_INTERRUPTS(CLBR_ANY)     # could let do_syscall_trace() call
                                        # schedule() instead
        movl %esp, %eax
        movl $1, %edx
@@ -515,39 +536,38 @@ syscall_fault:
        CFI_ADJUST_CFA_OFFSET 4
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-       movl $-EFAULT,EAX(%esp)
+       movl $-EFAULT,PT_EAX(%esp)
        jmp resume_userspace
 
 syscall_badsys:
-       movl $-ENOSYS,EAX(%esp)
+       movl $-ENOSYS,PT_EAX(%esp)
        jmp resume_userspace
        CFI_ENDPROC
 
 #define FIXUP_ESPFIX_STACK \
-       movl %esp, %eax; \
-       /* switch to 32bit stack using the pointer on top of 16bit stack */ \
-       lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
-       /* copy data from 16bit stack to 32bit stack */ \
-       call fixup_x86_bogus_stack; \
-       /* put ESP to the proper location */ \
-       movl %eax, %esp;
-#define UNWIND_ESPFIX_STACK \
+       /* since we are on a wrong stack, we cant make it a C code :( */ \
+       movl %gs:PDA_cpu, %ebx; \
+       PER_CPU(cpu_gdt_descr, %ebx); \
+       movl GDS_address(%ebx), %ebx; \
+       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
+       addl %esp, %eax; \
+       pushl $__KERNEL_DS; \
+       CFI_ADJUST_CFA_OFFSET 4; \
        pushl %eax; \
        CFI_ADJUST_CFA_OFFSET 4; \
+       lss (%esp), %esp; \
+       CFI_ADJUST_CFA_OFFSET -8;
+#define UNWIND_ESPFIX_STACK \
        movl %ss, %eax; \
-       /* see if on 16bit stack */ \
+       /* see if on espfix stack */ \
        cmpw $__ESPFIX_SS, %ax; \
-       je 28f; \
-27:    popl %eax; \
-       CFI_ADJUST_CFA_OFFSET -4; \
-.section .fixup,"ax"; \
-28:    movl $__KERNEL_DS, %eax; \
+       jne 27f; \
+       movl $__KERNEL_DS, %eax; \
        movl %eax, %ds; \
        movl %eax, %es; \
-       /* switch to 32bit stack */ \
+       /* switch to normal stack */ \
        FIXUP_ESPFIX_STACK; \
-       jmp 27b; \
-.previous
+27:;
 
 /*
  * Build the entry stubs and pointer table with
@@ -608,13 +628,16 @@ KPROBE_ENTRY(page_fault)
        CFI_ADJUST_CFA_OFFSET 4
        ALIGN
 error_code:
+       /* the function address is in %gs's slot on the stack */
+       pushl %es
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET es, 0*/
        pushl %ds
        CFI_ADJUST_CFA_OFFSET 4
        /*CFI_REL_OFFSET ds, 0*/
        pushl %eax
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET eax, 0
-       xorl %eax, %eax
        pushl %ebp
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ebp, 0
@@ -627,7 +650,6 @@ error_code:
        pushl %edx
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET edx, 0
-       decl %eax                       # eax = -1
        pushl %ecx
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ecx, 0
@@ -635,18 +657,20 @@ error_code:
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ebx, 0
        cld
-       pushl %es
+       pushl %gs
        CFI_ADJUST_CFA_OFFSET 4
-       /*CFI_REL_OFFSET es, 0*/
+       /*CFI_REL_OFFSET gs, 0*/
+       movl $(__KERNEL_PDA), %ecx
+       movl %ecx, %gs
        UNWIND_ESPFIX_STACK
        popl %ecx
        CFI_ADJUST_CFA_OFFSET -4
        /*CFI_REGISTER es, ecx*/
-       movl ES(%esp), %edi             # get the function address
-       movl ORIG_EAX(%esp), %edx       # get the error code
-       movl %eax, ORIG_EAX(%esp)
-       movl %ecx, ES(%esp)
-       /*CFI_REL_OFFSET es, ES*/
+       movl PT_GS(%esp), %edi          # get the function address
+       movl PT_ORIG_EAX(%esp), %edx    # get the error code
+       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
+       mov  %ecx, PT_GS(%esp)
+       /*CFI_REL_OFFSET gs, ES*/
        movl $(__USER_DS), %ecx
        movl %ecx, %ds
        movl %ecx, %es
@@ -682,7 +706,7 @@ ENTRY(device_not_available)
        GET_CR0_INTO_EAX
        testl $0x4, %eax                # EM (math emulation bit)
        jne device_not_available_emulate
-       preempt_stop
+       preempt_stop(CLBR_ANY)
        call math_state_restore
        jmp ret_from_exception
 device_not_available_emulate:
@@ -754,7 +778,7 @@ KPROBE_ENTRY(nmi)
        cmpw $__ESPFIX_SS, %ax
        popl %eax
        CFI_ADJUST_CFA_OFFSET -4
-       je nmi_16bit_stack
+       je nmi_espfix_stack
        cmpl $sysenter_entry,(%esp)
        je nmi_stack_fixup
        pushl %eax
@@ -797,7 +821,7 @@ nmi_debug_stack_check:
        FIX_STACK(24,nmi_stack_correct, 1)
        jmp nmi_stack_correct
 
-nmi_16bit_stack:
+nmi_espfix_stack:
        /* We have a RING0_INT_FRAME here.
         *
         * create the pointer to lss back
@@ -806,7 +830,6 @@ nmi_16bit_stack:
        CFI_ADJUST_CFA_OFFSET 4
        pushl %esp
        CFI_ADJUST_CFA_OFFSET 4
-       movzwl %sp, %esp
        addw $4, (%esp)
        /* copy the iret frame of 12 bytes */
        .rept 3
@@ -817,11 +840,11 @@ nmi_16bit_stack:
        CFI_ADJUST_CFA_OFFSET 4
        SAVE_ALL
        FIXUP_ESPFIX_STACK              # %eax == %esp
-       CFI_ADJUST_CFA_OFFSET -20       # the frame has now moved
        xorl %edx,%edx                  # zero error code
        call do_nmi
        RESTORE_REGS
-       lss 12+4(%esp), %esp            # back to 16bit stack
+       lss 12+4(%esp), %esp            # back to espfix stack
+       CFI_ADJUST_CFA_OFFSET -24
 1:     INTERRUPT_RETURN
        CFI_ENDPROC
 .section __ex_table,"a"
@@ -830,6 +853,19 @@ nmi_16bit_stack:
 .previous
 KPROBE_END(nmi)
 
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_iret)
+1:     iret
+.section __ex_table,"a"
+       .align 4
+       .long 1b,iret_exc
+.previous
+
+ENTRY(native_irq_enable_sysexit)
+       sti
+       sysexit
+#endif
+
 KPROBE_ENTRY(int3)
        RING0_INT_FRAME
        pushl $-1                       # mark this as an int
@@ -949,26 +985,27 @@ ENTRY(arch_unwind_init_running)
        movl    4(%esp), %edx
        movl    (%esp), %ecx
        leal    4(%esp), %eax
-       movl    %ebx, EBX(%edx)
+       movl    %ebx, PT_EBX(%edx)
        xorl    %ebx, %ebx
-       movl    %ebx, ECX(%edx)
-       movl    %ebx, EDX(%edx)
-       movl    %esi, ESI(%edx)
-       movl    %edi, EDI(%edx)
-       movl    %ebp, EBP(%edx)
-       movl    %ebx, EAX(%edx)
-       movl    $__USER_DS, DS(%edx)
-       movl    $__USER_DS, ES(%edx)
-       movl    %ebx, ORIG_EAX(%edx)
-       movl    %ecx, EIP(%edx)
+       movl    %ebx, PT_ECX(%edx)
+       movl    %ebx, PT_EDX(%edx)
+       movl    %esi, PT_ESI(%edx)
+       movl    %edi, PT_EDI(%edx)
+       movl    %ebp, PT_EBP(%edx)
+       movl    %ebx, PT_EAX(%edx)
+       movl    $__USER_DS, PT_DS(%edx)
+       movl    $__USER_DS, PT_ES(%edx)
+       movl    $0, PT_GS(%edx)
+       movl    %ebx, PT_ORIG_EAX(%edx)
+       movl    %ecx, PT_EIP(%edx)
        movl    12(%esp), %ecx
-       movl    $__KERNEL_CS, CS(%edx)
-       movl    %ebx, EFLAGS(%edx)
-       movl    %eax, OLDESP(%edx)
+       movl    $__KERNEL_CS, PT_CS(%edx)
+       movl    %ebx, PT_EFLAGS(%edx)
+       movl    %eax, PT_OLDESP(%edx)
        movl    8(%esp), %eax
        movl    %ecx, 8(%esp)
-       movl    EBX(%edx), %ebx
-       movl    $__KERNEL_DS, OLDSS(%edx)
+       movl    PT_EBX(%edx), %ebx
+       movl    $__KERNEL_DS, PT_OLDSS(%edx)
        jmpl    *%eax
        CFI_ENDPROC
 ENDPROC(arch_unwind_init_running)
index ca31f18d277c5487e3afab809783f9d79beb4fbe..edef5084ce17104b6c46f900d70d22b480ed8794 100644 (file)
  */
 ENTRY(startup_32)
 
+#ifdef CONFIG_PARAVIRT
+        movl %cs, %eax
+        testl $0x3, %eax
+        jnz startup_paravirt
+#endif
+
 /*
  * Set segments to known values.
  */
@@ -302,6 +308,7 @@ is386:      movl $2,%ecx            # set MP
        movl %eax,%cr0
 
        call check_x87
+       call setup_pda
        lgdt cpu_gdt_descr
        lidt idt_descr
        ljmp $(__KERNEL_CS),$1f
@@ -312,10 +319,13 @@ is386:    movl $2,%ecx            # set MP
        movl %eax,%ds
        movl %eax,%es
 
-       xorl %eax,%eax                  # Clear FS/GS and LDT
+       xorl %eax,%eax                  # Clear FS and LDT
        movl %eax,%fs
-       movl %eax,%gs
        lldt %ax
+
+       movl $(__KERNEL_PDA),%eax
+       mov  %eax,%gs
+
        cld                     # gcc2 wants the direction flag cleared at all times
        pushl $0                # fake return address for unwinder
 #ifdef CONFIG_SMP
@@ -345,6 +355,23 @@ check_x87:
        .byte 0xDB,0xE4         /* fsetpm for 287, ignored by 387 */
        ret
 
+/*
+ * Point the GDT at this CPU's PDA.  On boot this will be
+ * cpu_gdt_table and boot_pda; for secondary CPUs, these will be
+ * that CPU's GDT and PDA.
+ */
+setup_pda:
+       /* get the PDA pointer */
+       movl start_pda, %eax
+
+       /* slot the PDA address into the GDT */
+       mov cpu_gdt_descr+2, %ecx
+       mov %ax, (__KERNEL_PDA+0+2)(%ecx)               /* base & 0x0000ffff */
+       shr $16, %eax
+       mov %al, (__KERNEL_PDA+4+0)(%ecx)               /* base & 0x00ff0000 */
+       mov %ah, (__KERNEL_PDA+4+3)(%ecx)               /* base & 0xff000000 */
+       ret
+
 /*
  *  setup_idt
  *
@@ -465,6 +492,33 @@ ignore_int:
 #endif
        iret
 
+#ifdef CONFIG_PARAVIRT
+startup_paravirt:
+       cld
+       movl $(init_thread_union+THREAD_SIZE),%esp
+
+       /* We take pains to preserve all the regs. */
+       pushl   %edx
+       pushl   %ecx
+       pushl   %eax
+
+       /* paravirt.o is last in link, and that probe fn never returns */
+       pushl   $__start_paravirtprobe
+1:
+       movl    0(%esp), %eax
+       pushl   (%eax)
+       movl    8(%esp), %eax
+       call    *(%esp)
+       popl    %eax
+
+       movl    4(%esp), %eax
+       movl    8(%esp), %ecx
+       movl    12(%esp), %edx
+
+       addl    $4, (%esp)
+       jmp     1b
+#endif
+
 /*
  * Real beginning of normal "text" segment
  */
@@ -484,6 +538,8 @@ ENTRY(empty_zero_page)
  * This starts the data section.
  */
 .data
+ENTRY(start_pda)
+       .long boot_pda
 
 ENTRY(stack_start)
        .long init_thread_union+THREAD_SIZE
@@ -525,7 +581,7 @@ idt_descr:
 
 # boot GDT descriptor (later on used by CPU#0):
        .word 0                         # 32 bit align gdt_desc.address
-cpu_gdt_descr:
+ENTRY(cpu_gdt_descr)
        .word GDT_ENTRIES*8-1
        .long cpu_gdt_table
 
@@ -584,8 +640,8 @@ ENTRY(cpu_gdt_table)
        .quad 0x00009a000000ffff        /* 0xc0 APM CS 16 code (16 bit) */
        .quad 0x004092000000ffff        /* 0xc8 APM DS    data */
 
-       .quad 0x0000920000000000        /* 0xd0 - ESPFIX 16-bit SS */
-       .quad 0x0000000000000000        /* 0xd8 - unused */
+       .quad 0x00c0920000000000        /* 0xd0 - ESPFIX SS */
+       .quad 0x00cf92000000ffff        /* 0xd8 - PDA */
        .quad 0x0000000000000000        /* 0xe0 - unused */
        .quad 0x0000000000000000        /* 0xe8 - unused */
        .quad 0x0000000000000000        /* 0xf0 - unused */
index 17647a530b2f4b927b5a8f40e1d7d88f0acd482c..45a8685bb60bb5e21e638cd6c0b6df95586606f2 100644 (file)
@@ -34,6 +34,7 @@ static int __init init_hpet_clocksource(void)
        unsigned long hpet_period;
        void __iomem* hpet_base;
        u64 tmp;
+       int err;
 
        if (!is_hpet_enabled())
                return -ENODEV;
@@ -61,7 +62,11 @@ static int __init init_hpet_clocksource(void)
        do_div(tmp, FSEC_PER_NSEC);
        clocksource_hpet.mult = (u32)tmp;
 
-       return clocksource_register(&clocksource_hpet);
+       err = clocksource_register(&clocksource_hpet);
+       if (err)
+               iounmap(hpet_base);
+
+       return err;
 }
 
 module_init(init_hpet_clocksource);
index 62996cd17084245dafd53bcfab9555c20cc38c60..c8d45821c788f77467535569816d820d395f8a4b 100644 (file)
@@ -381,7 +381,10 @@ void __init init_ISA_irqs (void)
        }
 }
 
-void __init init_IRQ(void)
+/* Overridden in paravirt.c */
+void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
+
+void __init native_init_IRQ(void)
 {
        int i;
 
index 3b7a63e0ed1a5b07121ae63c9e35863ae0469a2b..e21dcde0790e62cd389b6062803e9bd014a51048 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/pci.h>
 #include <linux/msi.h>
 #include <linux/htirq.h>
+#include <linux/freezer.h>
 
 #include <asm/io.h>
 #include <asm/smp.h>
@@ -153,14 +154,20 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
  * the interrupt, and we need to make sure the entry is fully populated
  * before that happens.
  */
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
-       unsigned long flags;
        union entry_union eu;
        eu.entry = e;
-       spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(apic, 0x11 + 2*pin, eu.w2);
        io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __ioapic_write_entry(apic, pin, e);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -836,8 +843,7 @@ static int __init find_isa_irq_pin(int irq, int type)
 
                if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
                     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-                    mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-                    mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+                    mp_bus_id_to_type[lbus] == MP_BUS_MCA
                    ) &&
                    (mp_irqs[i].mpc_irqtype == type) &&
                    (mp_irqs[i].mpc_srcbusirq == irq))
@@ -856,8 +862,7 @@ static int __init find_isa_irq_apic(int irq, int type)
 
                if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
                     mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
-                    mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
-                    mp_bus_id_to_type[lbus] == MP_BUS_NEC98
+                    mp_bus_id_to_type[lbus] == MP_BUS_MCA
                    ) &&
                    (mp_irqs[i].mpc_irqtype == type) &&
                    (mp_irqs[i].mpc_srcbusirq == irq))
@@ -987,12 +992,6 @@ static int EISA_ELCR(unsigned int irq)
 #define default_MCA_trigger(idx)       (1)
 #define default_MCA_polarity(idx)      (0)
 
-/* NEC98 interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_NEC98_trigger(idx)     (0)
-#define default_NEC98_polarity(idx)    (0)
-
 static int __init MPBIOS_polarity(int idx)
 {
        int bus = mp_irqs[idx].mpc_srcbus;
@@ -1027,11 +1026,6 @@ static int __init MPBIOS_polarity(int idx)
                                        polarity = default_MCA_polarity(idx);
                                        break;
                                }
-                               case MP_BUS_NEC98: /* NEC 98 pin */
-                               {
-                                       polarity = default_NEC98_polarity(idx);
-                                       break;
-                               }
                                default:
                                {
                                        printk(KERN_WARNING "broken BIOS!!\n");
@@ -1101,11 +1095,6 @@ static int MPBIOS_trigger(int idx)
                                        trigger = default_MCA_trigger(idx);
                                        break;
                                }
-                               case MP_BUS_NEC98: /* NEC 98 pin */
-                               {
-                                       trigger = default_NEC98_trigger(idx);
-                                       break;
-                               }
                                default:
                                {
                                        printk(KERN_WARNING "broken BIOS!!\n");
@@ -1167,7 +1156,6 @@ static int pin_2_irq(int idx, int apic, int pin)
                case MP_BUS_ISA: /* ISA pin */
                case MP_BUS_EISA:
                case MP_BUS_MCA:
-               case MP_BUS_NEC98:
                {
                        irq = mp_irqs[idx].mpc_srcbusirq;
                        break;
@@ -1235,7 +1223,7 @@ static inline int IO_APIC_irq_trigger(int irq)
 }
 
 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
-u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
+static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
 
 static int __assign_irq_vector(int irq)
 {
@@ -1360,8 +1348,8 @@ static void __init setup_IO_APIC_irqs(void)
                        if (!apic && (irq < 16))
                                disable_8259A_irq(irq);
                }
-               ioapic_write_entry(apic, pin, entry);
                spin_lock_irqsave(&ioapic_lock, flags);
+               __ioapic_write_entry(apic, pin, entry);
                set_native_irq_info(irq, TARGET_CPUS);
                spin_unlock_irqrestore(&ioapic_lock, flags);
        }
@@ -1926,6 +1914,15 @@ static void __init setup_ioapic_ids_from_mpc(void)
 static void __init setup_ioapic_ids_from_mpc(void) { }
 #endif
 
+static int no_timer_check __initdata;
+
+static int __init notimercheck(char *s)
+{
+       no_timer_check = 1;
+       return 1;
+}
+__setup("no_timer_check", notimercheck);
+
 /*
  * There is a nasty bug in some older SMP boards, their mptable lies
  * about the timer IRQ. We do the following to work around the situation:
@@ -1934,10 +1931,13 @@ static void __init setup_ioapic_ids_from_mpc(void) { }
  *     - if this function detects that timer IRQs are defunct, then we fall
  *       back to ISA timer IRQs
  */
-static int __init timer_irq_works(void)
+int __init timer_irq_works(void)
 {
        unsigned long t1 = jiffies;
 
+       if (no_timer_check)
+               return 1;
+
        local_irq_enable();
        /* Let ten ticks pass... */
        mdelay((10 * 1000) / HZ);
@@ -2161,9 +2161,15 @@ static inline void unlock_ExtINT_logic(void)
        unsigned char save_control, save_freq_select;
 
        pin  = find_isa_irq_pin(8, mp_INT);
+       if (pin == -1) {
+               WARN_ON_ONCE(1);
+               return;
+       }
        apic = find_isa_irq_apic(8, mp_INT);
-       if (pin == -1)
+       if (apic == -1) {
+               WARN_ON_ONCE(1);
                return;
+       }
 
        entry0 = ioapic_read_entry(apic, pin);
        clear_IO_APIC_pin(apic, pin);
@@ -2208,7 +2214,7 @@ int timer_uses_ioapic_pin_0;
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
  */
-static inline void check_timer(void)
+static inline void __init check_timer(void)
 {
        int apic1, pin1, apic2, pin2;
        int vector;
@@ -2856,8 +2862,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
        if (!ioapic && (irq < 16))
                disable_8259A_irq(irq);
 
-       ioapic_write_entry(ioapic, pin, entry);
        spin_lock_irqsave(&ioapic_lock, flags);
+       __ioapic_write_entry(ioapic, pin, entry);
        set_native_irq_info(irq, TARGET_CPUS);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 
index fc79e1e859c498a9d640ba40fcff7494fe6110b6..af1d53344993cac32221d7209bf853bed4f5febc 100644 (file)
@@ -184,7 +184,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
        mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
+       free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
        mutex_unlock(&kprobe_mutex);
 }
 
@@ -333,7 +333,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                return 1;
 
 ss_probe:
-#ifndef CONFIG_PREEMPT
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
        if (p->ainsn.boostable == 1 && !p->post_handler){
                /* Boost up -- we can execute copied instructions directly */
                reset_current_kprobe();
index 445211eb2d57cdfcab06bb2a0b1f3b2b6b6ad5da..b410e5fb034f285346ef41c4cc6929db3081790b 100644 (file)
@@ -160,16 +160,14 @@ static int read_default_ldt(void __user * ptr, unsigned long bytecount)
 {
        int err;
        unsigned long size;
-       void *address;
 
        err = 0;
-       address = &default_ldt[0];
        size = 5*sizeof(struct desc_struct);
        if (size > bytecount)
                size = bytecount;
 
        err = size;
-       if (copy_to_user(ptr, address, size))
+       if (clear_user(ptr, size))
                err = -EFAULT;
 
        return err;
index eb57a851789dc9061aeacf0a2a83813a45f93be6..b83672b895278798957b4a9748e96965c3d23e08 100644 (file)
@@ -283,10 +283,9 @@ static int __init mca_init(void)
        bus->f.mca_transform_memory = mca_dummy_transform_memory;
 
        /* get the motherboard device */
-       mca_dev = kmalloc(sizeof(struct mca_device), GFP_KERNEL);
+       mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL);
        if(unlikely(!mca_dev))
                goto out_nomem;
-       memset(mca_dev, 0, sizeof(struct mca_device));
 
        /*
         * We do not expect many MCA interrupts during initialization,
@@ -310,11 +309,9 @@ static int __init mca_init(void)
        mca_dev->slot = MCA_MOTHERBOARD;
        mca_register_device(MCA_PRIMARY_BUS, mca_dev);
 
-       mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+       mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
        if(unlikely(!mca_dev))
                goto out_unlock_nomem;
-       memset(mca_dev, 0, sizeof(struct mca_device));
-
 
        /* Put motherboard into video setup mode, read integrated video
         * POS registers, and turn motherboard setup off.
@@ -349,10 +346,9 @@ static int __init mca_init(void)
        }
        if(which_scsi) {
                /* found a scsi card */
-               mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+               mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
                if(unlikely(!mca_dev))
                        goto out_unlock_nomem;
-               memset(mca_dev, 0, sizeof(struct mca_device));
 
                for(j = 0; j < 8; j++)
                        mca_dev->pos[j] = pos[j];
@@ -378,10 +374,9 @@ static int __init mca_init(void)
                if(!mca_read_and_store_pos(pos))
                        continue;
 
-               mca_dev = kmalloc(sizeof(struct mca_device), GFP_ATOMIC);
+               mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
                if(unlikely(!mca_dev))
                        goto out_unlock_nomem;
-               memset(mca_dev, 0, sizeof(struct mca_device));
 
                for(j=0; j<8; j++)
                        mca_dev->pos[j]=pos[j];
index 23f5984d06540f9c42183b232ad98e488b5218ae..972346604f9d1b7d23841e317ff46c7559f49851 100644 (file)
@@ -703,7 +703,6 @@ static struct sysdev_driver mc_sysdev_driver = {
        .resume = mc_sysdev_resume,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
 static __cpuinit int
 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -726,7 +725,6 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 static struct notifier_block mc_cpu_notifier = {
        .notifier_call = mc_cpu_callback,
 };
-#endif
 
 static int __init microcode_init (void)
 {
index 470cf97e7cd3104bbfd6eb2de379d0bd0be9c460..d7d9c8b23f72782f7798340678a41536b7dd64e6 100644 (file)
@@ -108,7 +108,8 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *me)
 {
-       const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
+       const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
+               *para = NULL;
        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 
@@ -118,6 +119,8 @@ int module_finalize(const Elf_Ehdr *hdr,
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
                        locks= s;
+               if (!strcmp(".parainstructions", secstrings + s->sh_name))
+                       para = s;
        }
 
        if (alt) {
@@ -132,6 +135,12 @@ int module_finalize(const Elf_Ehdr *hdr,
                                            lseg, lseg + locks->sh_size,
                                            tseg, tseg + text->sh_size);
        }
+
+       if (para) {
+               void *pseg = (void *)para->sh_addr;
+               apply_paravirt(pseg, pseg + para->sh_size);
+       }
+
        return 0;
 }
 
index 442aaf8c77ebf593fd249f9f4e69db8df3070be9..2ce67228dff8a28e634b714b03e1898aa9fd3b7c 100644 (file)
@@ -249,8 +249,6 @@ static void __init MP_bus_info (struct mpc_config_bus *m)
                mp_current_pci_id++;
        } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
                mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
-       } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
-               mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
        } else {
                printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
        }
index a773f776c9ea3bef1627644e367f224f62105921..1d1a56cae34064755c1e6b78148ba780dba307f2 100644 (file)
@@ -195,7 +195,6 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
 {
        const u32 __user *tmp = (const u32 __user *)buf;
        u32 data[2];
-       size_t rv;
        u32 reg = *ppos;
        int cpu = iminor(file->f_dentry->d_inode);
        int err;
@@ -203,7 +202,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
        if (count % 8)
                return -EINVAL; /* Invalid chunk size */
 
-       for (rv = 0; count; count -= 8) {
+       for (; count; count -= 8) {
                if (copy_from_user(&data, tmp, 8))
                        return -EFAULT;
                err = do_wrmsr(cpu, reg, data[0], data[1]);
@@ -250,7 +249,6 @@ static int msr_device_create(int i)
        return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int msr_class_cpu_callback(struct notifier_block *nfb,
                                unsigned long action, void *hcpu)
 {
@@ -271,7 +269,6 @@ static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
 {
        .notifier_call = msr_class_cpu_callback,
 };
-#endif
 
 static int __init msr_init(void)
 {
index eaafe233a5da83f4abbb2c17e670b8a46fa4edaa..f5bc7e1be8016b504a65270a0a6f2d6aafe1e0eb 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/percpu.h>
 #include <linux/dmi.h>
 #include <linux/kprobes.h>
+#include <linux/cpumask.h>
 
 #include <asm/smp.h>
 #include <asm/nmi.h>
@@ -42,6 +43,8 @@ int nmi_watchdog_enabled;
 static DEFINE_PER_CPU(unsigned long, perfctr_nmi_owner);
 static DEFINE_PER_CPU(unsigned long, evntsel_nmi_owner[3]);
 
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
  */
@@ -867,14 +870,16 @@ static unsigned int
 
 void touch_nmi_watchdog (void)
 {
-       int i;
+       if (nmi_watchdog > 0) {
+               unsigned cpu;
 
-       /*
-        * Just reset the alert counters, (other CPUs might be
-        * spinning on locks we hold):
-        */
-       for_each_possible_cpu(i)
-               alert_counter[i] = 0;
+               /*
+                * Just reset the alert counters, (other CPUs might be
+                * spinning on locks we hold):
+                */
+               for_each_present_cpu (cpu)
+                       alert_counter[cpu] = 0;
+       }
 
        /*
         * Tickle the softlockup detector too:
@@ -907,6 +912,16 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
                touched = 1;
        }
 
+       if (cpu_isset(cpu, backtrace_mask)) {
+               static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
+
+               spin_lock(&lock);
+               printk("NMI backtrace for cpu %d\n", cpu);
+               dump_stack();
+               spin_unlock(&lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+
        sum = per_cpu(irq_stat, cpu).apic_timer_irqs;
 
        /* if the apic timer isn't firing, this cpu isn't doing much */
@@ -1033,6 +1048,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
 
 #endif
 
+void __trigger_all_cpu_backtrace(void)
+{
+       int i;
+
+       backtrace_mask = cpu_online_map;
+       /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpus_empty(backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+}
+
 EXPORT_SYMBOL(nmi_active);
 EXPORT_SYMBOL(nmi_watchdog);
 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
new file mode 100644 (file)
index 0000000..3dceab5
--- /dev/null
@@ -0,0 +1,569 @@
+/*  Paravirtualization interfaces
+    Copyright (C) 2006 Rusty Russell IBM Corporation
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/bcd.h>
+#include <linux/start_kernel.h>
+
+#include <asm/bug.h>
+#include <asm/paravirt.h>
+#include <asm/desc.h>
+#include <asm/setup.h>
+#include <asm/arch_hooks.h>
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+#include <asm/fixmap.h>
+#include <asm/apic.h>
+#include <asm/tlbflush.h>
+
+/* nop stub */
+static void native_nop(void)
+{
+}
+
+static void __init default_banner(void)
+{
+       printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
+              paravirt_ops.name);
+}
+
+char *memory_setup(void)
+{
+       return paravirt_ops.memory_setup();
+}
+
+/* Simple instruction patching code. */
+#define DEF_NATIVE(name, code)                                 \
+       extern const char start_##name[], end_##name[];         \
+       asm("start_" #name ": " code "; end_" #name ":")
+DEF_NATIVE(cli, "cli");
+DEF_NATIVE(sti, "sti");
+DEF_NATIVE(popf, "push %eax; popf");
+DEF_NATIVE(pushf, "pushf; pop %eax");
+DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
+DEF_NATIVE(iret, "iret");
+DEF_NATIVE(sti_sysexit, "sti; sysexit");
+
+static const struct native_insns
+{
+       const char *start, *end;
+} native_insns[] = {
+       [PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
+       [PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
+       [PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
+       [PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
+       [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
+       [PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
+       [PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
+};
+
+static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
+{
+       unsigned int insn_len;
+
+       /* Don't touch it if we don't have a replacement */
+       if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
+               return len;
+
+       insn_len = native_insns[type].end - native_insns[type].start;
+
+       /* Similarly if we can't fit replacement. */
+       if (len < insn_len)
+               return len;
+
+       memcpy(insns, native_insns[type].start, insn_len);
+       return insn_len;
+}
+
+static fastcall unsigned long native_get_debugreg(int regno)
+{
+       unsigned long val = 0;  /* Damn you, gcc! */
+
+       switch (regno) {
+       case 0:
+               asm("movl %%db0, %0" :"=r" (val)); break;
+       case 1:
+               asm("movl %%db1, %0" :"=r" (val)); break;
+       case 2:
+               asm("movl %%db2, %0" :"=r" (val)); break;
+       case 3:
+               asm("movl %%db3, %0" :"=r" (val)); break;
+       case 6:
+               asm("movl %%db6, %0" :"=r" (val)); break;
+       case 7:
+               asm("movl %%db7, %0" :"=r" (val)); break;
+       default:
+               BUG();
+       }
+       return val;
+}
+
+static fastcall void native_set_debugreg(int regno, unsigned long value)
+{
+       switch (regno) {
+       case 0:
+               asm("movl %0,%%db0"     : /* no output */ :"r" (value));
+               break;
+       case 1:
+               asm("movl %0,%%db1"     : /* no output */ :"r" (value));
+               break;
+       case 2:
+               asm("movl %0,%%db2"     : /* no output */ :"r" (value));
+               break;
+       case 3:
+               asm("movl %0,%%db3"     : /* no output */ :"r" (value));
+               break;
+       case 6:
+               asm("movl %0,%%db6"     : /* no output */ :"r" (value));
+               break;
+       case 7:
+               asm("movl %0,%%db7"     : /* no output */ :"r" (value));
+               break;
+       default:
+               BUG();
+       }
+}
+
+void init_IRQ(void)
+{
+       paravirt_ops.init_IRQ();
+}
+
+static fastcall void native_clts(void)
+{
+       asm volatile ("clts");
+}
+
+static fastcall unsigned long native_read_cr0(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static fastcall void native_write_cr0(unsigned long val)
+{
+       asm volatile("movl %0,%%cr0": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr2(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static fastcall void native_write_cr2(unsigned long val)
+{
+       asm volatile("movl %0,%%cr2": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr3(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static fastcall void native_write_cr3(unsigned long val)
+{
+       asm volatile("movl %0,%%cr3": :"r" (val));
+}
+
+static fastcall unsigned long native_read_cr4(void)
+{
+       unsigned long val;
+       asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
+       return val;
+}
+
+static fastcall unsigned long native_read_cr4_safe(void)
+{
+       unsigned long val;
+       /* This could fault if %cr4 does not exist */
+       asm("1: movl %%cr4, %0          \n"
+               "2:                             \n"
+               ".section __ex_table,\"a\"      \n"
+               ".long 1b,2b                    \n"
+               ".previous                      \n"
+               : "=r" (val): "0" (0));
+       return val;
+}
+
+static fastcall void native_write_cr4(unsigned long val)
+{
+       asm volatile("movl %0,%%cr4": :"r" (val));
+}
+
+static fastcall unsigned long native_save_fl(void)
+{
+       unsigned long f;
+       asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
+       return f;
+}
+
+static fastcall void native_restore_fl(unsigned long f)
+{
+       asm volatile("pushl %0 ; popfl": /* no output */
+                            :"g" (f)
+                            :"memory", "cc");
+}
+
+static fastcall void native_irq_disable(void)
+{
+       asm volatile("cli": : :"memory");
+}
+
+static fastcall void native_irq_enable(void)
+{
+       asm volatile("sti": : :"memory");
+}
+
+static fastcall void native_safe_halt(void)
+{
+       asm volatile("sti; hlt": : :"memory");
+}
+
+static fastcall void native_halt(void)
+{
+       asm volatile("hlt": : :"memory");
+}
+
+static fastcall void native_wbinvd(void)
+{
+       asm volatile("wbinvd": : :"memory");
+}
+
+static fastcall unsigned long long native_read_msr(unsigned int msr, int *err)
+{
+       unsigned long long val;
+
+       asm volatile("2: rdmsr ; xorl %0,%0\n"
+                    "1:\n\t"
+                    ".section .fixup,\"ax\"\n\t"
+                    "3:  movl %3,%0 ; jmp 1b\n\t"
+                    ".previous\n\t"
+                    ".section __ex_table,\"a\"\n"
+                    "   .align 4\n\t"
+                    "   .long  2b,3b\n\t"
+                    ".previous"
+                    : "=r" (*err), "=A" (val)
+                    : "c" (msr), "i" (-EFAULT));
+
+       return val;
+}
+
+static fastcall int native_write_msr(unsigned int msr, unsigned long long val)
+{
+       int err;
+       asm volatile("2: wrmsr ; xorl %0,%0\n"
+                    "1:\n\t"
+                    ".section .fixup,\"ax\"\n\t"
+                    "3:  movl %4,%0 ; jmp 1b\n\t"
+                    ".previous\n\t"
+                    ".section __ex_table,\"a\"\n"
+                    "   .align 4\n\t"
+                    "   .long  2b,3b\n\t"
+                    ".previous"
+                    : "=a" (err)
+                    : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
+                      "i" (-EFAULT));
+       return err;
+}
+
+static fastcall unsigned long long native_read_tsc(void)
+{
+       unsigned long long val;
+       asm volatile("rdtsc" : "=A" (val));
+       return val;
+}
+
+static fastcall unsigned long long native_read_pmc(void)
+{
+       unsigned long long val;
+       asm volatile("rdpmc" : "=A" (val));
+       return val;
+}
+
+static fastcall void native_load_tr_desc(void)
+{
+       asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+
+static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr)
+{
+       asm volatile("lgdt %0"::"m" (*dtr));
+}
+
+static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr)
+{
+       asm volatile("lidt %0"::"m" (*dtr));
+}
+
+static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr)
+{
+       asm ("sgdt %0":"=m" (*dtr));
+}
+
+static fastcall void native_store_idt(struct Xgt_desc_struct *dtr)
+{
+       asm ("sidt %0":"=m" (*dtr));
+}
+
+static fastcall unsigned long native_store_tr(void)
+{
+       unsigned long tr;
+       asm ("str %0":"=r" (tr));
+       return tr;
+}
+
+static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+       C(0); C(1); C(2);
+#undef C
+}
+
+static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
+{
+       u32 *lp = (u32 *)((char *)dt + entry*8);
+       lp[0] = entry_low;
+       lp[1] = entry_high;
+}
+
+static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+       native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+       native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
+{
+       native_write_dt_entry(dt, entrynum, low, high);
+}
+
+static fastcall void native_load_esp0(struct tss_struct *tss,
+                                     struct thread_struct *thread)
+{
+       tss->esp0 = thread->esp0;
+
+       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+               tss->ss1 = thread->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+       }
+}
+
+static fastcall void native_io_delay(void)
+{
+       asm volatile("outb %al,$0x80");
+}
+
+static fastcall void native_flush_tlb(void)
+{
+       __native_flush_tlb();
+}
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+static fastcall void native_flush_tlb_global(void)
+{
+       __native_flush_tlb_global();
+}
+
+static fastcall void native_flush_tlb_single(u32 addr)
+{
+       __native_flush_tlb_single(addr);
+}
+
+#ifndef CONFIG_X86_PAE
+static fastcall void native_set_pte(pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+       *ptep = pteval;
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       *pmdp = pmdval;
+}
+
+#else /* CONFIG_X86_PAE */
+
+static fastcall void native_set_pte(pte_t *ptep, pte_t pte)
+{
+       ptep->pte_high = pte.pte_high;
+       smp_wmb();
+       ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
+{
+       ptep->pte_high = pte.pte_high;
+       smp_wmb();
+       ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+       ptep->pte_low = 0;
+       smp_wmb();
+       ptep->pte_high = pte.pte_high;
+       smp_wmb();
+       ptep->pte_low = pte.pte_low;
+}
+
+static fastcall void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+       set_64bit((unsigned long long *)ptep,pte_val(pteval));
+}
+
+static fastcall void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
+}
+
+static fastcall void native_set_pud(pud_t *pudp, pud_t pudval)
+{
+       *pudp = pudval;
+}
+
+static fastcall void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+       ptep->pte_low = 0;
+       smp_wmb();
+       ptep->pte_high = 0;
+}
+
+static fastcall void native_pmd_clear(pmd_t *pmd)
+{
+       u32 *tmp = (u32 *)pmd;
+       *tmp = 0;
+       smp_wmb();
+       *(tmp + 1) = 0;
+}
+#endif /* CONFIG_X86_PAE */
+
+/* These are in entry.S */
+extern fastcall void native_iret(void);
+extern fastcall void native_irq_enable_sysexit(void);
+
+static int __init print_banner(void)
+{
+       paravirt_ops.banner();
+       return 0;
+}
+core_initcall(print_banner);
+
+/* We simply declare start_kernel to be the paravirt probe of last resort. */
+paravirt_probe(start_kernel);
+
+struct paravirt_ops paravirt_ops = {
+       .name = "bare hardware",
+       .paravirt_enabled = 0,
+       .kernel_rpl = 0,
+
+       .patch = native_patch,
+       .banner = default_banner,
+       .arch_setup = native_nop,
+       .memory_setup = machine_specific_memory_setup,
+       .get_wallclock = native_get_wallclock,
+       .set_wallclock = native_set_wallclock,
+       .time_init = time_init_hook,
+       .init_IRQ = native_init_IRQ,
+
+       .cpuid = native_cpuid,
+       .get_debugreg = native_get_debugreg,
+       .set_debugreg = native_set_debugreg,
+       .clts = native_clts,
+       .read_cr0 = native_read_cr0,
+       .write_cr0 = native_write_cr0,
+       .read_cr2 = native_read_cr2,
+       .write_cr2 = native_write_cr2,
+       .read_cr3 = native_read_cr3,
+       .write_cr3 = native_write_cr3,
+       .read_cr4 = native_read_cr4,
+       .read_cr4_safe = native_read_cr4_safe,
+       .write_cr4 = native_write_cr4,
+       .save_fl = native_save_fl,
+       .restore_fl = native_restore_fl,
+       .irq_disable = native_irq_disable,
+       .irq_enable = native_irq_enable,
+       .safe_halt = native_safe_halt,
+       .halt = native_halt,
+       .wbinvd = native_wbinvd,
+       .read_msr = native_read_msr,
+       .write_msr = native_write_msr,
+       .read_tsc = native_read_tsc,
+       .read_pmc = native_read_pmc,
+       .load_tr_desc = native_load_tr_desc,
+       .set_ldt = native_set_ldt,
+       .load_gdt = native_load_gdt,
+       .load_idt = native_load_idt,
+       .store_gdt = native_store_gdt,
+       .store_idt = native_store_idt,
+       .store_tr = native_store_tr,
+       .load_tls = native_load_tls,
+       .write_ldt_entry = native_write_ldt_entry,
+       .write_gdt_entry = native_write_gdt_entry,
+       .write_idt_entry = native_write_idt_entry,
+       .load_esp0 = native_load_esp0,
+
+       .set_iopl_mask = native_set_iopl_mask,
+       .io_delay = native_io_delay,
+       .const_udelay = __const_udelay,
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       .apic_write = native_apic_write,
+       .apic_write_atomic = native_apic_write_atomic,
+       .apic_read = native_apic_read,
+#endif
+
+       .flush_tlb_user = native_flush_tlb,
+       .flush_tlb_kernel = native_flush_tlb_global,
+       .flush_tlb_single = native_flush_tlb_single,
+
+       .set_pte = native_set_pte,
+       .set_pte_at = native_set_pte_at,
+       .set_pmd = native_set_pmd,
+       .pte_update = (void *)native_nop,
+       .pte_update_defer = (void *)native_nop,
+#ifdef CONFIG_X86_PAE
+       .set_pte_atomic = native_set_pte_atomic,
+       .set_pte_present = native_set_pte_present,
+       .set_pud = native_set_pud,
+       .pte_clear = native_pte_clear,
+       .pmd_clear = native_pmd_clear,
+#endif
+
+       .irq_enable_sysexit = native_irq_enable_sysexit,
+       .iret = native_iret,
+};
+EXPORT_SYMBOL(paravirt_ops);
index 5c8c6ef1fc5e650c3af994b0c5d569b3e693fbca..41af692c1584908b6864356b757294b64b787de2 100644 (file)
@@ -92,14 +92,12 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
        if (!mem_base)
                goto out;
 
-       dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+       dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
        if (!dev->dma_mem)
                goto out;
-       memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
-       dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
+       dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
        if (!dev->dma_mem->bitmap)
                goto free1_out;
-       memset(dev->dma_mem->bitmap, 0, bitmap_size);
 
        dev->dma_mem->virt_base = mem_base;
        dev->dma_mem->device_base = device_addr;
index dd53c58f64f1bd944dd65190fc5d8f98bf6c8643..99308510a17c7fdab49f6a90a1f09e9f9917cf37 100644 (file)
@@ -56,6 +56,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
+#include <asm/pda.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
@@ -99,22 +100,18 @@ EXPORT_SYMBOL(enable_hlt);
  */
 void default_idle(void)
 {
-       local_irq_enable();
-
        if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
                current_thread_info()->status &= ~TS_POLLING;
                smp_mb__after_clear_bit();
-               while (!need_resched()) {
-                       local_irq_disable();
-                       if (!need_resched())
-                               safe_halt();
-                       else
-                               local_irq_enable();
-               }
+               local_irq_disable();
+               if (!need_resched())
+                       safe_halt();    /* enables interrupts racelessly */
+               else
+                       local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
        } else {
-               while (!need_resched())
-                       cpu_relax();
+               /* loop is done by the caller */
+               cpu_relax();
        }
 }
 #ifdef CONFIG_APM_MODULE
@@ -128,14 +125,7 @@ EXPORT_SYMBOL(default_idle);
  */
 static void poll_idle (void)
 {
-       local_irq_enable();
-
-       asm volatile(
-               "2:"
-               "testl %0, %1;"
-               "rep; nop;"
-               "je 2b;"
-               : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
+       cpu_relax();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -256,8 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 static void mwait_idle(void)
 {
        local_irq_enable();
-       while (!need_resched())
-               mwait_idle_with_hints(0, 0);
+       mwait_idle_with_hints(0, 0);
 }
 
 void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
@@ -314,8 +303,8 @@ void show_regs(struct pt_regs * regs)
                regs->eax,regs->ebx,regs->ecx,regs->edx);
        printk("ESI: %08lx EDI: %08lx EBP: %08lx",
                regs->esi, regs->edi, regs->ebp);
-       printk(" DS: %04x ES: %04x\n",
-               0xffff & regs->xds,0xffff & regs->xes);
+       printk(" DS: %04x ES: %04x GS: %04x\n",
+              0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
 
        cr0 = read_cr0();
        cr2 = read_cr2();
@@ -346,6 +335,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 
        regs.xds = __USER_DS;
        regs.xes = __USER_DS;
+       regs.xgs = __KERNEL_PDA;
        regs.orig_eax = -1;
        regs.eip = (unsigned long) kernel_thread_helper;
        regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -431,7 +421,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        p->thread.eip = (unsigned long) ret_from_fork;
 
        savesegment(fs,p->thread.fs);
-       savesegment(gs,p->thread.gs);
 
        tsk = current;
        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -508,7 +497,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
        dump->regs.ds = regs->xds;
        dump->regs.es = regs->xes;
        savesegment(fs,dump->regs.fs);
-       savesegment(gs,dump->regs.gs);
+       dump->regs.gs = regs->xgs;
        dump->regs.orig_eax = regs->orig_eax;
        dump->regs.eip = regs->eip;
        dump->regs.cs = regs->xcs;
@@ -648,22 +637,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
 
        __unlazy_fpu(prev_p);
 
+
+       /* we're going to use this soon, after a few expensive things */
+       if (next_p->fpu_counter > 5)
+               prefetch(&next->i387.fxsave);
+
        /*
         * Reload esp0.
         */
        load_esp0(tss, next);
 
        /*
-        * Save away %fs and %gs. No need to save %es and %ds, as
-        * those are always kernel segments while inside the kernel.
-        * Doing this before setting the new TLS descriptors avoids
-        * the situation where we temporarily have non-reloadable
-        * segments in %fs and %gs.  This could be an issue if the
-        * NMI handler ever used %fs or %gs (it does not today), or
-        * if the kernel is running inside of a hypervisor layer.
+        * Save away %fs. No need to save %gs, as it was saved on the
+        * stack on entry.  No need to save %es and %ds, as those are
+        * always kernel segments while inside the kernel.  Doing this
+        * before setting the new TLS descriptors avoids the situation
+        * where we temporarily have non-reloadable segments in %fs
+        * and %gs.  This could be an issue if the NMI handler ever
+        * used %fs or %gs (it does not today), or if the kernel is
+        * running inside of a hypervisor layer.
         */
        savesegment(fs, prev->fs);
-       savesegment(gs, prev->gs);
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
@@ -671,22 +665,14 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        load_TLS(next, cpu);
 
        /*
-        * Restore %fs and %gs if needed.
+        * Restore %fs if needed.
         *
-        * Glibc normally makes %fs be zero, and %gs is one of
-        * the TLS segments.
+        * Glibc normally makes %fs be zero.
         */
        if (unlikely(prev->fs | next->fs))
                loadsegment(fs, next->fs);
 
-       if (prev->gs | next->gs)
-               loadsegment(gs, next->gs);
-
-       /*
-        * Restore IOPL if needed.
-        */
-       if (unlikely(prev->iopl != next->iopl))
-               set_iopl_mask(next->iopl);
+       write_pda(pcurrent, next_p);
 
        /*
         * Now maybe handle debug registers and/or IO bitmaps
@@ -697,6 +683,13 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
 
        disable_tsc(prev_p, next_p);
 
+       /* If the task has used fpu the last 5 timeslices, just do a full
+        * restore of the math state immediately to avoid the trap; the
+        * chances of needing FPU soon are obviously high now
+        */
+       if (next_p->fpu_counter > 5)
+               math_state_restore();
+
        return prev_p;
 }
 
index 775f50e9395bb048a2e193ef60483af172c7c045..f3f94ac5736a254c4a97eb58aa7335a70328df18 100644 (file)
@@ -94,13 +94,9 @@ static int putreg(struct task_struct *child,
                                return -EIO;
                        child->thread.fs = value;
                        return 0;
-               case GS:
-                       if (value && (value & 3) != 3)
-                               return -EIO;
-                       child->thread.gs = value;
-                       return 0;
                case DS:
                case ES:
+               case GS:
                        if (value && (value & 3) != 3)
                                return -EIO;
                        value &= 0xffff;
@@ -116,8 +112,8 @@ static int putreg(struct task_struct *child,
                        value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
                        break;
        }
-       if (regno > GS*4)
-               regno -= 2*4;
+       if (regno > ES*4)
+               regno -= 1*4;
        put_stack_long(child, regno - sizeof(struct pt_regs), value);
        return 0;
 }
@@ -131,18 +127,16 @@ static unsigned long getreg(struct task_struct *child,
                case FS:
                        retval = child->thread.fs;
                        break;
-               case GS:
-                       retval = child->thread.gs;
-                       break;
                case DS:
                case ES:
+               case GS:
                case SS:
                case CS:
                        retval = 0xffff;
                        /* fall through */
                default:
-                       if (regno > GS*4)
-                               regno -= 2*4;
+                       if (regno > ES*4)
+                               regno -= 1*4;
                        regno = regno - sizeof(struct pt_regs);
                        retval &= get_stack_long(child, regno);
        }
index 9f6ab1789bb05fc81593c47211b16bc45be894e1..a01320a7b63651ba6ec32914c1b5db16e44455d8 100644 (file)
@@ -3,10 +3,23 @@
  */
 #include <linux/pci.h>
 #include <linux/irq.h>
+#include <asm/pci-direct.h>
+#include <asm/genapic.h>
+#include <asm/cpu.h>
 
 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
+static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+{
+#ifdef CONFIG_X86_64
+       if (genapic !=  &apic_flat)
+               panic("APIC mode must be flat on this system\n");
+#elif defined(CONFIG_X86_GENERICARCH)
+       if (genapic != &apic_default)
+               panic("APIC mode must be default(flat) on this system. Use apic=default\n");
+#endif
+}
 
-static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
+void __init quirk_intel_irqbalance(void)
 {
        u8 config, rev;
        u32 word;
@@ -16,18 +29,18 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
         * based platforms.
         * Disable SW irqbalance/affinity on those platforms.
         */
-       pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+       rev = read_pci_config_byte(0, 0, 0, PCI_CLASS_REVISION);
        if (rev > 0x9)
                return;
 
        printk(KERN_INFO "Intel E7520/7320/7525 detected.");
 
-       /* enable access to config space*/
-       pci_read_config_byte(dev, 0xf4, &config);
-       pci_write_config_byte(dev, 0xf4, config|0x2);
+       /* enable access to config space */
+       config = read_pci_config_byte(0, 0, 0, 0xf4);
+       write_pci_config_byte(0, 0, 0, 0xf4, config|0x2);
 
        /* read xTPR register */
-       raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
+       word = read_pci_config_16(0, 0, 0x40, 0x4c);
 
        if (!(word & (1 << 13))) {
                printk(KERN_INFO "Disabling irq balancing and affinity\n");
@@ -37,14 +50,25 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
                noirqdebug_setup("");
 #ifdef CONFIG_PROC_FS
                no_irq_affinity = 1;
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+               printk(KERN_INFO "Disabling cpu hotplug control\n");
+               enable_cpu_hotplug = 0;
+#endif
+#ifdef CONFIG_X86_64
+               /* force the genapic selection to flat mode so that
+                * interrupts can be redirected to more than one CPU.
+                */
+               genapic_force = &apic_flat;
 #endif
        }
 
-       /* put back the original value for config space*/
+       /* put back the original value for config space */
        if (!(config & 0x2))
-               pci_write_config_byte(dev, 0xf4, config);
+               write_pci_config_byte(0, 0, 0, 0xf4, config);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7320_MCH,  quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7525_MCH,  quirk_intel_irqbalance);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7520_MCH,  quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7320_MCH,  verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7525_MCH,  verify_quirk_intel_irqbalance);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_E7520_MCH,  verify_quirk_intel_irqbalance);
+
 #endif
index 84278e0093a2a9d6d0d9b5593ce432f8e749cb59..3514b4153f7fd7d1aadc31acce100dd86ed8ad95 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/dmi.h>
 #include <linux/ctype.h>
 #include <linux/pm.h>
+#include <linux/reboot.h>
 #include <asm/uaccess.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
index 141041dde74d0c8fe91913b445b2d68626776aed..79df6e612dbd03a20bfcc959dbb1ceba7158c1dd 100644 (file)
@@ -63,9 +63,6 @@
 #include <setup_arch.h>
 #include <bios_ebda.h>
 
-/* Forward Declaration. */
-void __init find_max_pfn(void);
-
 /* This value is set up by the early boot code to point to the value
    immediately after the boot time page tables.  It contains a *physical*
    address, and must not be in the .bss segment! */
@@ -76,11 +73,8 @@ int disable_pse __devinitdata = 0;
 /*
  * Machine setup..
  */
-
-#ifdef CONFIG_EFI
-int efi_enabled = 0;
-EXPORT_SYMBOL(efi_enabled);
-#endif
+extern struct resource code_resource;
+extern struct resource data_resource;
 
 /* cpu data as detected by the assembly code in head.S */
 struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
@@ -99,12 +93,6 @@ unsigned int machine_submodel_id;
 unsigned int BIOS_revision;
 unsigned int mca_pentium_flag;
 
-/* For PCI or other memory-mapped resources */
-unsigned long pci_mem_start = 0x10000000;
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_mem_start);
-#endif
-
 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
 int bootloader_type;
 
@@ -134,7 +122,6 @@ struct ist_info ist_info;
        defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
 EXPORT_SYMBOL(ist_info);
 #endif
-struct e820map e820;
 
 extern void early_cpu_init(void);
 extern int root_mountflags;
@@ -149,516 +136,6 @@ static char command_line[COMMAND_LINE_SIZE];
 
 unsigned char __initdata boot_params[PARAM_SIZE];
 
-static struct resource data_resource = {
-       .name   = "Kernel data",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource code_resource = {
-       .name   = "Kernel code",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource system_rom_resource = {
-       .name   = "System ROM",
-       .start  = 0xf0000,
-       .end    = 0xfffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource extension_rom_resource = {
-       .name   = "Extension ROM",
-       .start  = 0xe0000,
-       .end    = 0xeffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource adapter_rom_resources[] = { {
-       .name   = "Adapter ROM",
-       .start  = 0xc8000,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-}, {
-       .name   = "Adapter ROM",
-       .start  = 0,
-       .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-} };
-
-static struct resource video_rom_resource = {
-       .name   = "Video ROM",
-       .start  = 0xc0000,
-       .end    = 0xc7fff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
-};
-
-static struct resource video_ram_resource = {
-       .name   = "Video RAM area",
-       .start  = 0xa0000,
-       .end    = 0xbffff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
-};
-
-static struct resource standard_io_resources[] = { {
-       .name   = "dma1",
-       .start  = 0x0000,
-       .end    = 0x001f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "pic1",
-       .start  = 0x0020,
-       .end    = 0x0021,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "timer0",
-       .start  = 0x0040,
-       .end    = 0x0043,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "timer1",
-       .start  = 0x0050,
-       .end    = 0x0053,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "keyboard",
-       .start  = 0x0060,
-       .end    = 0x006f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "dma page reg",
-       .start  = 0x0080,
-       .end    = 0x008f,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "pic2",
-       .start  = 0x00a0,
-       .end    = 0x00a1,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "dma2",
-       .start  = 0x00c0,
-       .end    = 0x00df,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-}, {
-       .name   = "fpu",
-       .start  = 0x00f0,
-       .end    = 0x00ff,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_IO
-} };
-
-#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
-
-static int __init romchecksum(unsigned char *rom, unsigned long length)
-{
-       unsigned char *p, sum = 0;
-
-       for (p = rom; p < rom + length; p++)
-               sum += *p;
-       return sum == 0;
-}
-
-static void __init probe_roms(void)
-{
-       unsigned long start, length, upper;
-       unsigned char *rom;
-       int           i;
-
-       /* video rom */
-       upper = adapter_rom_resources[0].start;
-       for (start = video_rom_resource.start; start < upper; start += 2048) {
-               rom = isa_bus_to_virt(start);
-               if (!romsignature(rom))
-                       continue;
-
-               video_rom_resource.start = start;
-
-               /* 0 < length <= 0x7f * 512, historically */
-               length = rom[2] * 512;
-
-               /* if checksum okay, trust length byte */
-               if (length && romchecksum(rom, length))
-                       video_rom_resource.end = start + length - 1;
-
-               request_resource(&iomem_resource, &video_rom_resource);
-               break;
-       }
-
-       start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
-       if (start < upper)
-               start = upper;
-
-       /* system rom */
-       request_resource(&iomem_resource, &system_rom_resource);
-       upper = system_rom_resource.start;
-
-       /* check for extension rom (ignore length byte!) */
-       rom = isa_bus_to_virt(extension_rom_resource.start);
-       if (romsignature(rom)) {
-               length = extension_rom_resource.end - extension_rom_resource.start + 1;
-               if (romchecksum(rom, length)) {
-                       request_resource(&iomem_resource, &extension_rom_resource);
-                       upper = extension_rom_resource.start;
-               }
-       }
-
-       /* check for adapter roms on 2k boundaries */
-       for (i = 0; i < ARRAY_SIZE(adapter_rom_resources) && start < upper; start += 2048) {
-               rom = isa_bus_to_virt(start);
-               if (!romsignature(rom))
-                       continue;
-
-               /* 0 < length <= 0x7f * 512, historically */
-               length = rom[2] * 512;
-
-               /* but accept any length that fits if checksum okay */
-               if (!length || start + length > upper || !romchecksum(rom, length))
-                       continue;
-
-               adapter_rom_resources[i].start = start;
-               adapter_rom_resources[i].end = start + length - 1;
-               request_resource(&iomem_resource, &adapter_rom_resources[i]);
-
-               start = adapter_rom_resources[i++].end & ~2047UL;
-       }
-}
-
-static void __init limit_regions(unsigned long long size)
-{
-       unsigned long long current_addr = 0;
-       int i;
-
-       if (efi_enabled) {
-               efi_memory_desc_t *md;
-               void *p;
-
-               for (p = memmap.map, i = 0; p < memmap.map_end;
-                       p += memmap.desc_size, i++) {
-                       md = p;
-                       current_addr = md->phys_addr + (md->num_pages << 12);
-                       if (md->type == EFI_CONVENTIONAL_MEMORY) {
-                               if (current_addr >= size) {
-                                       md->num_pages -=
-                                               (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
-                                       memmap.nr_map = i + 1;
-                                       return;
-                               }
-                       }
-               }
-       }
-       for (i = 0; i < e820.nr_map; i++) {
-               current_addr = e820.map[i].addr + e820.map[i].size;
-               if (current_addr < size)
-                       continue;
-
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-
-               if (e820.map[i].addr >= size) {
-                       /*
-                        * This region starts past the end of the
-                        * requested size, skip it completely.
-                        */
-                       e820.nr_map = i;
-               } else {
-                       e820.nr_map = i + 1;
-                       e820.map[i].size -= current_addr - size;
-               }
-               return;
-       }
-}
-
-void __init add_memory_region(unsigned long long start,
-                             unsigned long long size, int type)
-{
-       int x;
-
-       if (!efi_enabled) {
-                       x = e820.nr_map;
-
-               if (x == E820MAX) {
-                   printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
-                   return;
-               }
-
-               e820.map[x].addr = start;
-               e820.map[x].size = size;
-               e820.map[x].type = type;
-               e820.nr_map++;
-       }
-} /* add_memory_region */
-
-#define E820_DEBUG     1
-
-static void __init print_memory_map(char *who)
-{
-       int i;
-
-       for (i = 0; i < e820.nr_map; i++) {
-               printk(" %s: %016Lx - %016Lx ", who,
-                       e820.map[i].addr,
-                       e820.map[i].addr + e820.map[i].size);
-               switch (e820.map[i].type) {
-               case E820_RAM:  printk("(usable)\n");
-                               break;
-               case E820_RESERVED:
-                               printk("(reserved)\n");
-                               break;
-               case E820_ACPI:
-                               printk("(ACPI data)\n");
-                               break;
-               case E820_NVS:
-                               printk("(ACPI NVS)\n");
-                               break;
-               default:        printk("type %lu\n", e820.map[i].type);
-                               break;
-               }
-       }
-}
-
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries.  The following 
- * replaces the original e820 map with a new one, removing overlaps.
- *
- */
-struct change_member {
-       struct e820entry *pbios; /* pointer to original bios entry */
-       unsigned long long addr; /* address for this change point */
-};
-static struct change_member change_point_list[2*E820MAX] __initdata;
-static struct change_member *change_point[2*E820MAX] __initdata;
-static struct e820entry *overlap_list[E820MAX] __initdata;
-static struct e820entry new_bios[E820MAX] __initdata;
-
-int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
-{
-       struct change_member *change_tmp;
-       unsigned long current_type, last_type;
-       unsigned long long last_addr;
-       int chgidx, still_changing;
-       int overlap_entries;
-       int new_bios_entry;
-       int old_nr, new_nr, chg_nr;
-       int i;
-
-       /*
-               Visually we're performing the following (1,2,3,4 = memory types)...
-
-               Sample memory map (w/overlaps):
-                  ____22__________________
-                  ______________________4_
-                  ____1111________________
-                  _44_____________________
-                  11111111________________
-                  ____________________33__
-                  ___________44___________
-                  __________33333_________
-                  ______________22________
-                  ___________________2222_
-                  _________111111111______
-                  _____________________11_
-                  _________________4______
-
-               Sanitized equivalent (no overlap):
-                  1_______________________
-                  _44_____________________
-                  ___1____________________
-                  ____22__________________
-                  ______11________________
-                  _________1______________
-                  __________3_____________
-                  ___________44___________
-                  _____________33_________
-                  _______________2________
-                  ________________1_______
-                  _________________4______
-                  ___________________2____
-                  ____________________33__
-                  ______________________4_
-       */
-
-       /* if there's only one memory region, don't bother */
-       if (*pnr_map < 2)
-               return -1;
-
-       old_nr = *pnr_map;
-
-       /* bail out if we find any unreasonable addresses in bios map */
-       for (i=0; i<old_nr; i++)
-               if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
-                       return -1;
-
-       /* create pointers for initial change-point information (for sorting) */
-       for (i=0; i < 2*old_nr; i++)
-               change_point[i] = &change_point_list[i];
-
-       /* record all known change-points (starting and ending addresses),
-          omitting those that are for empty memory regions */
-       chgidx = 0;
-       for (i=0; i < old_nr; i++)      {
-               if (biosmap[i].size != 0) {
-                       change_point[chgidx]->addr = biosmap[i].addr;
-                       change_point[chgidx++]->pbios = &biosmap[i];
-                       change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
-                       change_point[chgidx++]->pbios = &biosmap[i];
-               }
-       }
-       chg_nr = chgidx;        /* true number of change-points */
-
-       /* sort change-point list by memory addresses (low -> high) */
-       still_changing = 1;
-       while (still_changing)  {
-               still_changing = 0;
-               for (i=1; i < chg_nr; i++)  {
-                       /* if <current_addr> > <last_addr>, swap */
-                       /* or, if current=<start_addr> & last=<end_addr>, swap */
-                       if ((change_point[i]->addr < change_point[i-1]->addr) ||
-                               ((change_point[i]->addr == change_point[i-1]->addr) &&
-                                (change_point[i]->addr == change_point[i]->pbios->addr) &&
-                                (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
-                          )
-                       {
-                               change_tmp = change_point[i];
-                               change_point[i] = change_point[i-1];
-                               change_point[i-1] = change_tmp;
-                               still_changing=1;
-                       }
-               }
-       }
-
-       /* create a new bios memory map, removing overlaps */
-       overlap_entries=0;       /* number of entries in the overlap table */
-       new_bios_entry=0;        /* index for creating new bios map entries */
-       last_type = 0;           /* start with undefined memory type */
-       last_addr = 0;           /* start with 0 as last starting address */
-       /* loop through change-points, determining affect on the new bios map */
-       for (chgidx=0; chgidx < chg_nr; chgidx++)
-       {
-               /* keep track of all overlapping bios entries */
-               if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
-               {
-                       /* add map entry to overlap list (> 1 entry implies an overlap) */
-                       overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
-               }
-               else
-               {
-                       /* remove entry from list (order independent, so swap with last) */
-                       for (i=0; i<overlap_entries; i++)
-                       {
-                               if (overlap_list[i] == change_point[chgidx]->pbios)
-                                       overlap_list[i] = overlap_list[overlap_entries-1];
-                       }
-                       overlap_entries--;
-               }
-               /* if there are overlapping entries, decide which "type" to use */
-               /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
-               current_type = 0;
-               for (i=0; i<overlap_entries; i++)
-                       if (overlap_list[i]->type > current_type)
-                               current_type = overlap_list[i]->type;
-               /* continue building up new bios map based on this information */
-               if (current_type != last_type)  {
-                       if (last_type != 0)      {
-                               new_bios[new_bios_entry].size =
-                                       change_point[chgidx]->addr - last_addr;
-                               /* move forward only if the new size was non-zero */
-                               if (new_bios[new_bios_entry].size != 0)
-                                       if (++new_bios_entry >= E820MAX)
-                                               break;  /* no more space left for new bios entries */
-                       }
-                       if (current_type != 0)  {
-                               new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
-                               new_bios[new_bios_entry].type = current_type;
-                               last_addr=change_point[chgidx]->addr;
-                       }
-                       last_type = current_type;
-               }
-       }
-       new_nr = new_bios_entry;   /* retain count for new bios entries */
-
-       /* copy new bios mapping into original location */
-       memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
-       *pnr_map = new_nr;
-
-       return 0;
-}
-
-/*
- * Copy the BIOS e820 map into a safe place.
- *
- * Sanity-check it while we're at it..
- *
- * If we're lucky and live on a modern system, the setup code
- * will have given us a memory map that we can use to properly
- * set up memory.  If we aren't, we'll fake a memory map.
- *
- * We check to see that the memory map contains at least 2 elements
- * before we'll use it, because the detection code in setup.S may
- * not be perfect and most every PC known to man has two memory
- * regions: one from 0 to 640k, and one from 1mb up.  (The IBM
- * thinkpad 560x, for example, does not cooperate with the memory
- * detection code.)
- */
-int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
-{
-       /* Only one memory region (or negative)? Ignore it */
-       if (nr_map < 2)
-               return -1;
-
-       do {
-               unsigned long long start = biosmap->addr;
-               unsigned long long size = biosmap->size;
-               unsigned long long end = start + size;
-               unsigned long type = biosmap->type;
-
-               /* Overflow in 64 bits? Ignore the memory map. */
-               if (start > end)
-                       return -1;
-
-               /*
-                * Some BIOSes claim RAM in the 640k - 1M region.
-                * Not right. Fix it up.
-                */
-               if (type == E820_RAM) {
-                       if (start < 0x100000ULL && end > 0xA0000ULL) {
-                               if (start < 0xA0000ULL)
-                                       add_memory_region(start, 0xA0000ULL-start, type);
-                               if (end <= 0x100000ULL)
-                                       continue;
-                               start = 0x100000ULL;
-                               size = end - start;
-                       }
-               }
-               add_memory_region(start, size, type);
-       } while (biosmap++,--nr_map);
-       return 0;
-}
-
 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
 struct edd edd;
 #ifdef CONFIG_EDD_MODULE
@@ -682,7 +159,7 @@ static inline void copy_edd(void)
 }
 #endif
 
-static int __initdata user_defined_memmap = 0;
+int __initdata user_defined_memmap = 0;
 
 /*
  * "mem=nopentium" disables the 4MB page tables.
@@ -719,51 +196,6 @@ static int __init parse_mem(char *arg)
 }
 early_param("mem", parse_mem);
 
-static int __init parse_memmap(char *arg)
-{
-       if (!arg)
-               return -EINVAL;
-
-       if (strcmp(arg, "exactmap") == 0) {
-#ifdef CONFIG_CRASH_DUMP
-               /* If we are doing a crash dump, we
-                * still need to know the real mem
-                * size before original memory map is
-                * reset.
-                */
-               find_max_pfn();
-               saved_max_pfn = max_pfn;
-#endif
-               e820.nr_map = 0;
-               user_defined_memmap = 1;
-       } else {
-               /* If the user specifies memory size, we
-                * limit the BIOS-provided memory map to
-                * that size. exactmap can be used to specify
-                * the exact map. mem=number can be used to
-                * trim the existing memory map.
-                */
-               unsigned long long start_at, mem_size;
-
-               mem_size = memparse(arg, &arg);
-               if (*arg == '@') {
-                       start_at = memparse(arg+1, &arg);
-                       add_memory_region(start_at, mem_size, E820_RAM);
-               } else if (*arg == '#') {
-                       start_at = memparse(arg+1, &arg);
-                       add_memory_region(start_at, mem_size, E820_ACPI);
-               } else if (*arg == '$') {
-                       start_at = memparse(arg+1, &arg);
-                       add_memory_region(start_at, mem_size, E820_RESERVED);
-               } else {
-                       limit_regions(mem_size);
-                       user_defined_memmap = 1;
-               }
-       }
-       return 0;
-}
-early_param("memmap", parse_memmap);
-
 #ifdef CONFIG_PROC_VMCORE
 /* elfcorehdr= specifies the location of elf core header
  * stored by the crashed kernel.
@@ -827,90 +259,6 @@ static int __init parse_reservetop(char *arg)
 }
 early_param("reservetop", parse_reservetop);
 
-/*
- * Callback for efi_memory_walk.
- */
-static int __init
-efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long *max_pfn = arg, pfn;
-
-       if (start < end) {
-               pfn = PFN_UP(end -1);
-               if (pfn > *max_pfn)
-                       *max_pfn = pfn;
-       }
-       return 0;
-}
-
-static int __init
-efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
-{
-       memory_present(0, PFN_UP(start), PFN_DOWN(end));
-       return 0;
-}
-
- /*
-  * This function checks if the entire range <start,end> is mapped with type.
-  *
-  * Note: this function only works correct if the e820 table is sorted and
-  * not-overlapping, which is the case
-  */
-int __init
-e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
-{
-       u64 start = s;
-       u64 end = e;
-       int i;
-       for (i = 0; i < e820.nr_map; i++) {
-               struct e820entry *ei = &e820.map[i];
-               if (type && ei->type != type)
-                       continue;
-               /* is the region (part) in overlap with the current region ?*/
-               if (ei->addr >= end || ei->addr + ei->size <= start)
-                       continue;
-               /* if the region is at the beginning of <start,end> we move
-                * start to the end of the region since it's ok until there
-                */
-               if (ei->addr <= start)
-                       start = ei->addr + ei->size;
-               /* if start is now at or beyond end, we're done, full
-                * coverage */
-               if (start >= end)
-                       return 1; /* we're done */
-       }
-       return 0;
-}
-
-/*
- * Find the highest page frame number we have available
- */
-void __init find_max_pfn(void)
-{
-       int i;
-
-       max_pfn = 0;
-       if (efi_enabled) {
-               efi_memmap_walk(efi_find_max_pfn, &max_pfn);
-               efi_memmap_walk(efi_memory_present_wrapper, NULL);
-               return;
-       }
-
-       for (i = 0; i < e820.nr_map; i++) {
-               unsigned long start, end;
-               /* RAM? */
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-               start = PFN_UP(e820.map[i].addr);
-               end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-               if (start >= end)
-                       continue;
-               if (end > max_pfn)
-                       max_pfn = end;
-               memory_present(0, start, end);
-       }
-}
-
 /*
  * Determine low and high memory ranges:
  */
@@ -970,68 +318,6 @@ unsigned long __init find_max_low_pfn(void)
        return max_low_pfn;
 }
 
-/*
- * Free all available memory for boot time allocation.  Used
- * as a callback function by efi_memory_walk()
- */
-
-static int __init
-free_available_memory(unsigned long start, unsigned long end, void *arg)
-{
-       /* check max_low_pfn */
-       if (start >= (max_low_pfn << PAGE_SHIFT))
-               return 0;
-       if (end >= (max_low_pfn << PAGE_SHIFT))
-               end = max_low_pfn << PAGE_SHIFT;
-       if (start < end)
-               free_bootmem(start, end - start);
-
-       return 0;
-}
-/*
- * Register fully available low RAM pages with the bootmem allocator.
- */
-static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
-{
-       int i;
-
-       if (efi_enabled) {
-               efi_memmap_walk(free_available_memory, NULL);
-               return;
-       }
-       for (i = 0; i < e820.nr_map; i++) {
-               unsigned long curr_pfn, last_pfn, size;
-               /*
-                * Reserve usable low memory
-                */
-               if (e820.map[i].type != E820_RAM)
-                       continue;
-               /*
-                * We are rounding up the start address of usable memory:
-                */
-               curr_pfn = PFN_UP(e820.map[i].addr);
-               if (curr_pfn >= max_low_pfn)
-                       continue;
-               /*
-                * ... and at the end of the usable range downwards:
-                */
-               last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
-
-               if (last_pfn > max_low_pfn)
-                       last_pfn = max_low_pfn;
-
-               /*
-                * .. finally, did all the rounding and playing
-                * around just make the area go away?
-                */
-               if (last_pfn <= curr_pfn)
-                       continue;
-
-               size = last_pfn - curr_pfn;
-               free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
-       }
-}
-
 /*
  * workaround for Dell systems that neglect to reserve EBDA
  */
@@ -1118,8 +404,8 @@ void __init setup_bootmem_allocator(void)
         * the (very unlikely) case of us accidentally initializing the
         * bootmem allocator with an invalid RAM area.
         */
-       reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
-                        bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
+       reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
+                        bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
 
        /*
         * reserve physical page 0 - it's a special BIOS page on many boxes,
@@ -1162,8 +448,7 @@ void __init setup_bootmem_allocator(void)
        if (LOADER_TYPE && INITRD_START) {
                if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
                        reserve_bootmem(INITRD_START, INITRD_SIZE);
-                       initrd_start =
-                               INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+                       initrd_start = INITRD_START + PAGE_OFFSET;
                        initrd_end = initrd_start+INITRD_SIZE;
                }
                else {
@@ -1200,126 +485,6 @@ void __init remapped_pgdat_init(void)
        }
 }
 
-/*
- * Request address space for all standard RAM and ROM resources
- * and also for regions reported as reserved by the e820.
- */
-static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
-{
-       int i;
-
-       probe_roms();
-       for (i = 0; i < e820.nr_map; i++) {
-               struct resource *res;
-#ifndef CONFIG_RESOURCES_64BIT
-               if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
-                       continue;
-#endif
-               res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
-               switch (e820.map[i].type) {
-               case E820_RAM:  res->name = "System RAM"; break;
-               case E820_ACPI: res->name = "ACPI Tables"; break;
-               case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
-               default:        res->name = "reserved";
-               }
-               res->start = e820.map[i].addr;
-               res->end = res->start + e820.map[i].size - 1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-               if (request_resource(&iomem_resource, res)) {
-                       kfree(res);
-                       continue;
-               }
-               if (e820.map[i].type == E820_RAM) {
-                       /*
-                        *  We don't know which RAM region contains kernel data,
-                        *  so we try it repeatedly and let the resource manager
-                        *  test it.
-                        */
-                       request_resource(res, code_resource);
-                       request_resource(res, data_resource);
-#ifdef CONFIG_KEXEC
-                       request_resource(res, &crashk_res);
-#endif
-               }
-       }
-}
-
-/*
- * Request address space for all standard resources
- *
- * This is called just before pcibios_init(), which is also a
- * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
- */
-static int __init request_standard_resources(void)
-{
-       int i;
-
-       printk("Setting up standard PCI resources\n");
-       if (efi_enabled)
-               efi_initialize_iomem_resources(&code_resource, &data_resource);
-       else
-               legacy_init_iomem_resources(&code_resource, &data_resource);
-
-       /* EFI systems may still have VGA */
-       request_resource(&iomem_resource, &video_ram_resource);
-
-       /* request I/O space for devices used on all i[345]86 PCs */
-       for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
-               request_resource(&ioport_resource, &standard_io_resources[i]);
-       return 0;
-}
-
-subsys_initcall(request_standard_resources);
-
-static void __init register_memory(void)
-{
-       unsigned long gapstart, gapsize, round;
-       unsigned long long last;
-       int i;
-
-       /*
-        * Search for the bigest gap in the low 32 bits of the e820
-        * memory space.
-        */
-       last = 0x100000000ull;
-       gapstart = 0x10000000;
-       gapsize = 0x400000;
-       i = e820.nr_map;
-       while (--i >= 0) {
-               unsigned long long start = e820.map[i].addr;
-               unsigned long long end = start + e820.map[i].size;
-
-               /*
-                * Since "last" is at most 4GB, we know we'll
-                * fit in 32 bits if this condition is true
-                */
-               if (last > end) {
-                       unsigned long gap = last - end;
-
-                       if (gap > gapsize) {
-                               gapsize = gap;
-                               gapstart = end;
-                       }
-               }
-               if (start < last)
-                       last = start;
-       }
-
-       /*
-        * See how much we want to round up: start off with
-        * rounding to the next 1MB area.
-        */
-       round = 0x100000;
-       while ((gapsize >> 4) > round)
-               round += round;
-       /* Fun with two's complement */
-       pci_mem_start = (gapstart + round) & -round;
-
-       printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
-               pci_mem_start, gapstart, gapsize);
-}
-
 #ifdef CONFIG_MCA
 static void set_mca_bus(int x)
 {
@@ -1329,6 +494,12 @@ static void set_mca_bus(int x)
 static void set_mca_bus(int x) { }
 #endif
 
+/* Overridden in paravirt.c if CONFIG_PARAVIRT */
+char * __attribute__((weak)) memory_setup(void)
+{
+       return machine_specific_memory_setup();
+}
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -1381,7 +552,7 @@ void __init setup_arch(char **cmdline_p)
                efi_init();
        else {
                printk(KERN_INFO "BIOS-provided physical RAM map:\n");
-               print_memory_map(machine_specific_memory_setup());
+               print_memory_map(memory_setup());
        }
 
        copy_edd();
index 43002cfb40c4e2811bf5006c9126fa2ff1cfdd46..65d7620eaa093756e083ed5d0f9786ccfc1bd917 100644 (file)
@@ -128,7 +128,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *peax
                         X86_EFLAGS_TF | X86_EFLAGS_SF | X86_EFLAGS_ZF | \
                         X86_EFLAGS_AF | X86_EFLAGS_PF | X86_EFLAGS_CF)
 
-       GET_SEG(gs);
+       COPY_SEG(gs);
        GET_SEG(fs);
        COPY_SEG(es);
        COPY_SEG(ds);
@@ -244,9 +244,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
 {
        int tmp, err = 0;
 
-       tmp = 0;
-       savesegment(gs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
+       err |= __put_user(regs->xgs, (unsigned int __user *)&sc->gs);
        savesegment(fs, tmp);
        err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
 
index 31e5c6573aae450b3a1f94fe7129de94fc4ea26e..5285aff8367fec481d33a7ed94c0278815202286 100644 (file)
@@ -321,7 +321,6 @@ static inline void leave_mm (unsigned long cpu)
 
 fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        unsigned long cpu;
 
        cpu = get_cpu();
@@ -352,7 +351,6 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
        smp_mb__after_clear_bit();
 out:
        put_cpu_no_resched();
-       set_irq_regs(old_regs);
 }
 
 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
@@ -607,14 +605,11 @@ void smp_send_stop(void)
  */
 fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        ack_APIC_irq();
-       set_irq_regs(old_regs);
 }
 
 fastcall void smp_call_function_interrupt(struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        void (*func) (void *info) = call_data->func;
        void *info = call_data->info;
        int wait = call_data->wait;
@@ -637,7 +632,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
                mb();
                atomic_inc(&call_data->finished);
        }
-       set_irq_regs(old_regs);
 }
 
 /*
@@ -699,6 +693,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
                put_cpu();
                return -EBUSY;
        }
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
        spin_lock_bh(&call_lock);
        __smp_call_function_single(cpu, func, info, nonatomic, wait);
        spin_unlock_bh(&call_lock);
index 4bb8b77cd65b257b4b54a730f5c5e3cf46595f4a..4bf0e3c83b8b3244536eb54a02d4d087c0b15cab 100644 (file)
  *             Dave Jones      :       Report invalid combinations of Athlon CPUs.
 *              Rusty Russell   :       Hacked into shape for new "hotplug" boot process. */
 
+
+/* SMP boot always wants to use real time delay to allow sufficient time for
+ * the APs to come online */
+#define USE_REAL_TIME_DELAY
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -52,6 +57,8 @@
 #include <asm/desc.h>
 #include <asm/arch_hooks.h>
 #include <asm/nmi.h>
+#include <asm/pda.h>
+#include <asm/genapic.h>
 
 #include <mach_apic.h>
 #include <mach_wakecpu.h>
@@ -536,11 +543,11 @@ set_cpu_sibling_map(int cpu)
 static void __devinit start_secondary(void *unused)
 {
        /*
-        * Dont put anything before smp_callin(), SMP
+        * Don't put *anything* before secondary_cpu_init(), SMP
         * booting is too fragile that we want to limit the
         * things done here to the most necessary things.
         */
-       cpu_init();
+       secondary_cpu_init();
        preempt_disable();
        smp_callin();
        while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
@@ -599,13 +606,16 @@ void __devinit initialize_secondary(void)
                "movl %0,%%esp\n\t"
                "jmp *%1"
                :
-               :"r" (current->thread.esp),"r" (current->thread.eip));
+               :"m" (current->thread.esp),"m" (current->thread.eip));
 }
 
+/* Static state in head.S used to set up a CPU */
 extern struct {
        void * esp;
        unsigned short ss;
 } stack_start;
+extern struct i386_pda *start_pda;
+extern struct Xgt_desc_struct cpu_gdt_descr;
 
 #ifdef CONFIG_NUMA
 
@@ -936,9 +946,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
        unsigned long start_eip;
        unsigned short nmi_high = 0, nmi_low = 0;
 
-       ++cpucount;
-       alternatives_smp_switch(1);
-
        /*
         * We can't use kernel_thread since we must avoid to
         * reschedule the child.
@@ -946,15 +953,30 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
        idle = alloc_idle_task(cpu);
        if (IS_ERR(idle))
                panic("failed fork for CPU %d", cpu);
+
+       /* Pre-allocate and initialize the CPU's GDT and PDA so it
+          doesn't have to do any memory allocation during the
+          delicate CPU-bringup phase. */
+       if (!init_gdt(cpu, idle)) {
+               printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+               return -1;      /* ? */
+       }
+
        idle->thread.eip = (unsigned long) start_secondary;
        /* start_eip had better be page-aligned! */
        start_eip = setup_trampoline();
 
+       ++cpucount;
+       alternatives_smp_switch(1);
+
        /* So we see what's up   */
        printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
        /* Stack for startup_32 can be just as for start_secondary onwards */
        stack_start.esp = (void *) idle->thread.esp;
 
+       start_pda = cpu_pda(cpu);
+       cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu);
+
        irq_ctx_init(cpu);
 
        x86_cpu_to_apicid[cpu] = apicid;
@@ -1049,13 +1071,15 @@ void cpu_exit_clear(void)
 
 struct warm_boot_cpu_info {
        struct completion *complete;
+       struct work_struct task;
        int apicid;
        int cpu;
 };
 
-static void __cpuinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
 {
-       struct warm_boot_cpu_info *info = p;
+       struct warm_boot_cpu_info *info =
+               container_of(work, struct warm_boot_cpu_info, task);
        do_boot_cpu(info->apicid, info->cpu);
        complete(info->complete);
 }
@@ -1064,7 +1088,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct warm_boot_cpu_info info;
-       struct work_struct task;
        int     apicid, ret;
        struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
 
@@ -1089,7 +1112,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
        info.complete = &done;
        info.apicid = apicid;
        info.cpu = cpu;
-       INIT_WORK(&task, do_warm_boot_cpu, &info);
+       INIT_WORK(&info.task, do_warm_boot_cpu);
 
        tsc_sync_disabled = 1;
 
@@ -1097,7 +1120,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
        clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
                        KERNEL_PGD_PTRS);
        flush_tlb_all();
-       schedule_work(&task);
+       schedule_work(&info.task);
        wait_for_completion(&done);
 
        tsc_sync_disabled = 0;
@@ -1108,34 +1131,15 @@ exit:
 }
 #endif
 
-static void smp_tune_scheduling (void)
+static void smp_tune_scheduling(void)
 {
        unsigned long cachesize;       /* kB   */
-       unsigned long bandwidth = 350; /* MB/s */
-       /*
-        * Rough estimation for SMP scheduling, this is the number of
-        * cycles it takes for a fully memory-limited process to flush
-        * the SMP-local cache.
-        *
-        * (For a P5 this pretty much means we will choose another idle
-        *  CPU almost always at wakeup time (this is due to the small
-        *  L1 cache), on PIIs it's around 50-100 usecs, depending on
-        *  the cache size)
-        */
 
-       if (!cpu_khz) {
-               /*
-                * this basically disables processor-affinity
-                * scheduling on SMP without a TSC.
-                */
-               return;
-       } else {
+       if (cpu_khz) {
                cachesize = boot_cpu_data.x86_cache_size;
-               if (cachesize == -1) {
-                       cachesize = 16; /* Pentiums, 2x8kB cache */
-                       bandwidth = 100;
-               }
-               max_cache_size = cachesize * 1024;
+
+               if (cachesize > 0)
+                       max_cache_size = cachesize * 1024;
        }
 }
 
@@ -1461,6 +1465,12 @@ int __devinit __cpu_up(unsigned int cpu)
        cpu_set(cpu, smp_commenced_mask);
        while (!cpu_isset(cpu, cpu_online_map))
                cpu_relax();
+
+#ifdef CONFIG_X86_GENERICARCH
+       if (num_online_cpus() > 8 && genapic == &apic_default)
+               panic("Default flat APIC routing can't be used with > 8 cpus\n");
+#endif
+
        return 0;
 }
 
index 713ba39d32c66db0e2ad4ca0580088ffccef785f..7de9117b5a3ada15bf9e401afc9695e849bac8af 100644 (file)
  * Should the kernel map a VDSO page into processes and pass its
  * address down to glibc upon exec()?
  */
+#ifdef CONFIG_PARAVIRT
+unsigned int __read_mostly vdso_enabled = 0;
+#else
 unsigned int __read_mostly vdso_enabled = 1;
+#endif
 
 EXPORT_SYMBOL_GPL(vdso_enabled);
 
@@ -132,7 +136,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
                goto up_fail;
        }
 
-       vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                ret = -ENOMEM;
                goto up_fail;
index 78af572fd17c3e4a903aaf59062744455808e77a..c505b16c099038381efc87791d37e49c0aeb158c 100644 (file)
@@ -56,6 +56,7 @@
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/timer.h>
+#include <asm/time.h>
 
 #include "mach_time.h"
 
@@ -116,10 +117,7 @@ static int set_rtc_mmss(unsigned long nowtime)
        /* gets recalled with irq locally disabled */
        /* XXX - does irqsave resolve this? -johnstul */
        spin_lock_irqsave(&rtc_lock, flags);
-       if (efi_enabled)
-               retval = efi_set_rtc_mmss(nowtime);
-       else
-               retval = mach_set_rtc_mmss(nowtime);
+       retval = set_wallclock(nowtime);
        spin_unlock_irqrestore(&rtc_lock, flags);
 
        return retval;
@@ -223,10 +221,7 @@ unsigned long get_cmos_time(void)
 
        spin_lock_irqsave(&rtc_lock, flags);
 
-       if (efi_enabled)
-               retval = efi_get_time();
-       else
-               retval = mach_get_cmos_time();
+       retval = get_wallclock();
 
        spin_unlock_irqrestore(&rtc_lock, flags);
 
@@ -370,7 +365,7 @@ static void __init hpet_time_init(void)
                printk("Using HPET for base-timer\n");
        }
 
-       time_init_hook();
+       do_time_init();
 }
 #endif
 
@@ -392,5 +387,5 @@ void __init time_init(void)
 
        do_settimeofday(&ts);
 
-       time_init_hook();
+       do_time_init();
 }
index 1a2a979cf6a344f1603b33605a319831ee667a95..1e4702dfcd017e7649961465eaeb734116d9178b 100644 (file)
@@ -132,14 +132,20 @@ int __init hpet_enable(void)
         * the single HPET timer for system time.
         */
 #ifdef CONFIG_HPET_EMULATE_RTC
-       if (!(id & HPET_ID_NUMBER))
+       if (!(id & HPET_ID_NUMBER)) {
+               iounmap(hpet_virt_address);
+               hpet_virt_address = NULL;
                return -1;
+       }
 #endif
 
 
        hpet_period = hpet_readl(HPET_PERIOD);
-       if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD))
+       if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) {
+               iounmap(hpet_virt_address);
+               hpet_virt_address = NULL;
                return -1;
+       }
 
        /*
         * 64 bit math
@@ -156,8 +162,11 @@ int __init hpet_enable(void)
 
        hpet_use_timer = id & HPET_ID_LEGSUP;
 
-       if (hpet_timer_stop_set_go(hpet_tick))
+       if (hpet_timer_stop_set_go(hpet_tick)) {
+               iounmap(hpet_virt_address);
+               hpet_virt_address = NULL;
                return -1;
+       }
 
        use_hpet = 1;
 
index 07d6da36a8253f39bbce04e90ab4edc0ece8aed4..79cf608e14ca16254db381dcba319979047745fc 100644 (file)
@@ -40,14 +40,18 @@ int arch_register_cpu(int num)
         * restrictions and assumptions in kernel. This basically
         * doesnt add a control file, one cannot attempt to offline
         * BSP.
+        *
+        * Also certain PCI quirks require not to enable hotplug control
+        * for all CPU's.
         */
-       if (!num)
-               cpu_devices[num].cpu.no_control = 1;
+       if (num && enable_cpu_hotplug)
+               cpu_devices[num].cpu.hotpluggable = 1;
 
        return register_cpu(&cpu_devices[num].cpu, num);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+int enable_cpu_hotplug = 1;
 
 void arch_unregister_cpu(int num) {
        return unregister_cpu(&cpu_devices[num].cpu);
index fe9c5e8e7e6f5066e5f92865f0c6d71ec2887050..68de48e498ca910804e6d71bf2f61bc01caed126 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kexec.h>
 #include <linux/unwind.h>
 #include <linux/uaccess.h>
+#include <linux/nmi.h>
 
 #ifdef CONFIG_EISA
 #include <linux/ioport.h>
@@ -61,9 +62,6 @@ int panic_on_unrecovered_nmi;
 
 asmlinkage int system_call(void);
 
-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
-               { 0, 0 }, { 0, 0 } };
-
 /* Do we ignore FPU interrupts ? */
 char ignore_fpu_irq = 0;
 
@@ -94,7 +92,7 @@ asmlinkage void alignment_check(void);
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void machine_check(void);
 
-static int kstack_depth_to_print = 24;
+int kstack_depth_to_print = 24;
 #ifdef CONFIG_STACK_UNWIND
 static int call_trace = 1;
 #else
@@ -163,16 +161,25 @@ dump_trace_unwind(struct unwind_frame_info *info, void *data)
 {
        struct ops_and_data *oad = (struct ops_and_data *)data;
        int n = 0;
+       unsigned long sp = UNW_SP(info);
 
+       if (arch_unw_user_mode(info))
+               return -1;
        while (unwind(info) == 0 && UNW_PC(info)) {
                n++;
                oad->ops->address(oad->data, UNW_PC(info));
                if (arch_unw_user_mode(info))
                        break;
+               if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+                   && sp > UNW_SP(info))
+                       break;
+               sp = UNW_SP(info);
        }
        return n;
 }
 
+#define MSG(msg) ops->warning(data, msg)
+
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack,
                struct stacktrace_ops *ops, void *data)
@@ -191,29 +198,31 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        if (unwind_init_frame_info(&info, task, regs) == 0)
                                unw_ret = dump_trace_unwind(&info, &oad);
                } else if (task == current)
-                       unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+                       unw_ret = unwind_init_running(&info, dump_trace_unwind,
+                                                     &oad);
                else {
                        if (unwind_init_blocked(&info, task) == 0)
                                unw_ret = dump_trace_unwind(&info, &oad);
                }
                if (unw_ret > 0) {
                        if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-                               ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+                               ops->warning_symbol(data,
+                                            "DWARF2 unwinder stuck at %s",
                                             UNW_PC(&info));
                                if (UNW_SP(&info) >= PAGE_OFFSET) {
-                                       ops->warning(data, "Leftover inexact backtrace:\n");
+                                       MSG("Leftover inexact backtrace:");
                                        stack = (void *)UNW_SP(&info);
                                        if (!stack)
                                                return;
                                        ebp = UNW_FP(&info);
                                } else
-                                       ops->warning(data, "Full inexact backtrace again:\n");
+                                       MSG("Full inexact backtrace again:");
                        } else if (call_trace >= 1)
                                return;
                        else
-                               ops->warning(data, "Full inexact backtrace again:\n");
+                               MSG("Full inexact backtrace again:");
                } else
-                       ops->warning(data, "Inexact backtrace:\n");
+                       MSG("Inexact backtrace:");
        }
        if (!stack) {
                unsigned long dummy;
@@ -247,6 +256,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                stack = (unsigned long*)context->previous_esp;
                if (!stack)
                        break;
+               touch_nmi_watchdog();
        }
 }
 EXPORT_SYMBOL(dump_trace);
@@ -379,7 +389,7 @@ void show_registers(struct pt_regs *regs)
         * time of the fault..
         */
        if (in_kernel) {
-               u8 __user *eip;
+               u8 *eip;
                int code_bytes = 64;
                unsigned char c;
 
@@ -388,18 +398,20 @@ void show_registers(struct pt_regs *regs)
 
                printk(KERN_EMERG "Code: ");
 
-               eip = (u8 __user *)regs->eip - 43;
-               if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+               eip = (u8 *)regs->eip - 43;
+               if (eip < (u8 *)PAGE_OFFSET ||
+                       probe_kernel_address(eip, c)) {
                        /* try starting at EIP */
-                       eip = (u8 __user *)regs->eip;
+                       eip = (u8 *)regs->eip;
                        code_bytes = 32;
                }
                for (i = 0; i < code_bytes; i++, eip++) {
-                       if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
+                       if (eip < (u8 *)PAGE_OFFSET ||
+                               probe_kernel_address(eip, c)) {
                                printk(" Bad EIP value.");
                                break;
                        }
-                       if (eip == (u8 __user *)regs->eip)
+                       if (eip == (u8 *)regs->eip)
                                printk("<%02x> ", c);
                        else
                                printk("%02x ", c);
@@ -415,7 +427,7 @@ static void handle_BUG(struct pt_regs *regs)
 
        if (eip < PAGE_OFFSET)
                return;
-       if (probe_kernel_address((unsigned short __user *)eip, ud2))
+       if (probe_kernel_address((unsigned short *)eip, ud2))
                return;
        if (ud2 != 0x0b0f)
                return;
@@ -428,11 +440,11 @@ static void handle_BUG(struct pt_regs *regs)
                char *file;
                char c;
 
-               if (probe_kernel_address((unsigned short __user *)(eip + 2),
-                                       line))
+               if (probe_kernel_address((unsigned short *)(eip + 2), line))
                        break;
-               if (__get_user(file, (char * __user *)(eip + 4)) ||
-                   (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+               if (probe_kernel_address((char **)(eip + 4), file) ||
+                   (unsigned long)file < PAGE_OFFSET ||
+                       probe_kernel_address(file, c))
                        file = "<bad filename>";
 
                printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
@@ -452,7 +464,7 @@ void die(const char * str, struct pt_regs * regs, long err)
                u32 lock_owner;
                int lock_owner_depth;
        } die = {
-               .lock =                 SPIN_LOCK_UNLOCKED,
+               .lock =                 __SPIN_LOCK_UNLOCKED(die.lock),
                .lock_owner =           -1,
                .lock_owner_depth =     0
        };
@@ -707,8 +719,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
 {
        printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
                "CPU %d.\n", reason, smp_processor_id());
-       printk(KERN_EMERG "You probably have a hardware problem with your RAM "
-                       "chips\n");
+       printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
        if (panic_on_unrecovered_nmi)
                 panic("NMI: Not continuing");
 
@@ -773,7 +784,6 @@ void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
        printk(" on CPU%d, eip %08lx, registers:\n",
                smp_processor_id(), regs->eip);
        show_registers(regs);
-       printk(KERN_EMERG "console shuts up ...\n");
        console_silent();
        spin_unlock(&nmi_print_lock);
        bust_spinlocks(0);
@@ -1088,49 +1098,24 @@ fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
 #endif
 }
 
-fastcall void setup_x86_bogus_stack(unsigned char * stk)
-{
-       unsigned long *switch16_ptr, *switch32_ptr;
-       struct pt_regs *regs;
-       unsigned long stack_top, stack_bot;
-       unsigned short iret_frame16_off;
-       int cpu = smp_processor_id();
-       /* reserve the space on 32bit stack for the magic switch16 pointer */
-       memmove(stk, stk + 8, sizeof(struct pt_regs));
-       switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
-       regs = (struct pt_regs *)stk;
-       /* now the switch32 on 16bit stack */
-       stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-       stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
-       switch32_ptr = (unsigned long *)(stack_top - 8);
-       iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
-       /* copy iret frame on 16bit stack */
-       memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
-       /* fill in the switch pointers */
-       switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
-       switch16_ptr[1] = __ESPFIX_SS;
-       switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
-               8 - CPU_16BIT_STACK_SIZE;
-       switch32_ptr[1] = __KERNEL_DS;
-}
-
-fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
+fastcall unsigned long patch_espfix_desc(unsigned long uesp,
+                                         unsigned long kesp)
 {
-       unsigned long *switch32_ptr;
-       unsigned char *stack16, *stack32;
-       unsigned long stack_top, stack_bot;
-       int len;
        int cpu = smp_processor_id();
-       stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
-       stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
-       switch32_ptr = (unsigned long *)(stack_top - 8);
-       /* copy the data from 16bit stack to 32bit stack */
-       len = CPU_16BIT_STACK_SIZE - 8 - sp;
-       stack16 = (unsigned char *)(stack_bot + sp);
-       stack32 = (unsigned char *)
-               (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
-       memcpy(stack32, stack16, len);
-       return stack32;
+       struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
+       struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
+       unsigned long base = (kesp - uesp) & -THREAD_SIZE;
+       unsigned long new_kesp = kesp - base;
+       unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
+       __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
+       /* Set up base for espfix segment */
+       desc &= 0x00f0ff0000000000ULL;
+       desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
+               ((((__u64)base) << 32) & 0xff00000000000000ULL) |
+               ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
+               (lim_pages & 0xffff);
+       *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
+       return new_kesp;
 }
 
 /*
@@ -1143,7 +1128,7 @@ fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
  * Must be called with kernel preemption disabled (in this case,
  * local interrupts are disabled at the call-site in entry.S).
  */
-asmlinkage void math_state_restore(struct pt_regs regs)
+asmlinkage void math_state_restore(void)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = thread->task;
@@ -1153,6 +1138,7 @@ asmlinkage void math_state_restore(struct pt_regs regs)
                init_fpu(tsk);
        restore_fpu(tsk);
        thread->status |= TS_USEDFPU;   /* So we fnsave on switch_to() */
+       tsk->fpu_counter++;
 }
 
 #ifndef CONFIG_MATH_EMULATION
index fbc95828cd7493082aeeedc0396a762db056097b..1bbe45dca7a0ec626a3b2963d9031e579811d195 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <asm/delay.h>
 #include <asm/tsc.h>
-#include <asm/delay.h>
 #include <asm/io.h>
 
 #include "mach_timer.h"
@@ -217,7 +216,7 @@ static unsigned int cpufreq_delayed_issched = 0;
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *work)
 {
        unsigned int cpu;
 
@@ -306,7 +305,7 @@ static int __init cpufreq_tsc(void)
 {
        int ret;
 
-       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
        ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
                                        CPUFREQ_TRANSITION_NOTIFIER);
        if (!ret)
index cbcd61d6120b2fdf13b1f59a46721eb8c3c89b10..be2f96e67f78d80c51ba6c1b9a5fde49563ff415 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/highmem.h>
 #include <linux/ptrace.h>
 #include <linux/audit.h>
+#include <linux/stddef.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
 /*
  * 8- and 16-bit register defines..
  */
-#define AL(regs)       (((unsigned char *)&((regs)->eax))[0])
-#define AH(regs)       (((unsigned char *)&((regs)->eax))[1])
-#define IP(regs)       (*(unsigned short *)&((regs)->eip))
-#define SP(regs)       (*(unsigned short *)&((regs)->esp))
+#define AL(regs)       (((unsigned char *)&((regs)->pt.eax))[0])
+#define AH(regs)       (((unsigned char *)&((regs)->pt.eax))[1])
+#define IP(regs)       (*(unsigned short *)&((regs)->pt.eip))
+#define SP(regs)       (*(unsigned short *)&((regs)->pt.esp))
 
 /*
  * virtual flags (16 and 32-bit versions)
 #define SAFE_MASK      (0xDD5)
 #define RETURN_MASK    (0xDFF)
 
-#define VM86_REGS_PART2 orig_eax
-#define VM86_REGS_SIZE1 \
-        ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) )
-#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
+/* convert kernel_vm86_regs to vm86_regs */
+static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
+                                 const struct kernel_vm86_regs *regs)
+{
+       int ret = 0;
+
+       /* kernel_vm86_regs is missing xfs, so copy everything up to
+          (but not including) xgs, and then rest after xgs. */
+       ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.xgs));
+       ret += copy_to_user(&user->__null_gs, &regs->pt.xgs,
+                           sizeof(struct kernel_vm86_regs) -
+                           offsetof(struct kernel_vm86_regs, pt.xgs));
+
+       return ret;
+}
+
+/* convert vm86_regs to kernel_vm86_regs */
+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
+                                   const struct vm86_regs __user *user,
+                                   unsigned extra)
+{
+       int ret = 0;
+
+       ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.xgs));
+       ret += copy_from_user(&regs->pt.xgs, &user->__null_gs,
+                             sizeof(struct kernel_vm86_regs) -
+                             offsetof(struct kernel_vm86_regs, pt.xgs) +
+                             extra);
+
+       return ret;
+}
 
 struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
 struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
@@ -112,10 +140,8 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
                printk("no vm86_info: BAD\n");
                do_exit(SIGSEGV);
        }
-       set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
-       tmp = copy_to_user(&current->thread.vm86_info->regs,regs, VM86_REGS_SIZE1);
-       tmp += copy_to_user(&current->thread.vm86_info->regs.VM86_REGS_PART2,
-               &regs->VM86_REGS_PART2, VM86_REGS_SIZE2);
+       set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask);
+       tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs,regs);
        tmp += put_user(current->thread.screen_bitmap,&current->thread.vm86_info->screen_bitmap);
        if (tmp) {
                printk("vm86: could not access userspace vm86_info\n");
@@ -129,9 +155,11 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
        current->thread.saved_esp0 = 0;
        put_cpu();
 
-       loadsegment(fs, current->thread.saved_fs);
-       loadsegment(gs, current->thread.saved_gs);
        ret = KVM86->regs32;
+
+       loadsegment(fs, current->thread.saved_fs);
+       ret->xgs = current->thread.saved_gs;
+
        return ret;
 }
 
@@ -183,9 +211,9 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
        tsk = current;
        if (tsk->thread.saved_esp0)
                goto out;
-       tmp  = copy_from_user(&info, v86, VM86_REGS_SIZE1);
-       tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
-               (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2);
+       tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+                                      offsetof(struct kernel_vm86_struct, vm86plus) -
+                                      sizeof(info.regs));
        ret = -EFAULT;
        if (tmp)
                goto out;
@@ -233,9 +261,9 @@ asmlinkage int sys_vm86(struct pt_regs regs)
        if (tsk->thread.saved_esp0)
                goto out;
        v86 = (struct vm86plus_struct __user *)regs.ecx;
-       tmp  = copy_from_user(&info, v86, VM86_REGS_SIZE1);
-       tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2,
-               (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2);
+       tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
+                                      offsetof(struct kernel_vm86_struct, regs32) -
+                                      sizeof(info.regs));
        ret = -EFAULT;
        if (tmp)
                goto out;
@@ -252,15 +280,15 @@ out:
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
 {
        struct tss_struct *tss;
-       long eax;
 /*
  * make sure the vm86() system call doesn't try to do anything silly
  */
-       info->regs.__null_ds = 0;
-       info->regs.__null_es = 0;
+       info->regs.pt.xds = 0;
+       info->regs.pt.xes = 0;
+       info->regs.pt.xgs = 0;
 
-/* we are clearing fs,gs later just before "jmp resume_userspace",
- * because starting with Linux 2.1.x they aren't no longer saved/restored
+/* we are clearing fs later just before "jmp resume_userspace",
+ * because it is not saved/restored.
  */
 
 /*
@@ -268,10 +296,10 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
  * has set it up safely, so this makes sure interrupt etc flags are
  * inherited from protected mode.
  */
-       VEFLAGS = info->regs.eflags;
-       info->regs.eflags &= SAFE_MASK;
-       info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK;
-       info->regs.eflags |= VM_MASK;
+       VEFLAGS = info->regs.pt.eflags;
+       info->regs.pt.eflags &= SAFE_MASK;
+       info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK;
+       info->regs.pt.eflags |= VM_MASK;
 
        switch (info->cpu_type) {
                case CPU_286:
@@ -294,7 +322,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        info->regs32->eax = 0;
        tsk->thread.saved_esp0 = tsk->thread.esp0;
        savesegment(fs, tsk->thread.saved_fs);
-       savesegment(gs, tsk->thread.saved_gs);
+       tsk->thread.saved_gs = info->regs32->xgs;
 
        tss = &per_cpu(init_tss, get_cpu());
        tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
@@ -306,19 +334,18 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        tsk->thread.screen_bitmap = info->screen_bitmap;
        if (info->flags & VM86_SCREEN_BITMAP)
                mark_screen_rdonly(tsk->mm);
-       __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t");
-       __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax));
 
        /*call audit_syscall_exit since we do not exit via the normal paths */
        if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(eax), eax);
+               audit_syscall_exit(AUDITSC_RESULT(0), 0);
 
        __asm__ __volatile__(
                "movl %0,%%esp\n\t"
                "movl %1,%%ebp\n\t"
+               "mov  %2, %%fs\n\t"
                "jmp resume_userspace"
                : /* no outputs */
-               :"r" (&info->regs), "r" (task_thread_info(tsk)));
+               :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
        /* we never return here */
 }
 
@@ -348,12 +375,12 @@ static inline void clear_IF(struct kernel_vm86_regs * regs)
 
 static inline void clear_TF(struct kernel_vm86_regs * regs)
 {
-       regs->eflags &= ~TF_MASK;
+       regs->pt.eflags &= ~TF_MASK;
 }
 
 static inline void clear_AC(struct kernel_vm86_regs * regs)
 {
-       regs->eflags &= ~AC_MASK;
+       regs->pt.eflags &= ~AC_MASK;
 }
 
 /* It is correct to call set_IF(regs) from the set_vflags_*
@@ -370,7 +397,7 @@ static inline void clear_AC(struct kernel_vm86_regs * regs)
 static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs)
 {
        set_flags(VEFLAGS, eflags, current->thread.v86mask);
-       set_flags(regs->eflags, eflags, SAFE_MASK);
+       set_flags(regs->pt.eflags, eflags, SAFE_MASK);
        if (eflags & IF_MASK)
                set_IF(regs);
        else
@@ -380,7 +407,7 @@ static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs
 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs)
 {
        set_flags(VFLAGS, flags, current->thread.v86mask);
-       set_flags(regs->eflags, flags, SAFE_MASK);
+       set_flags(regs->pt.eflags, flags, SAFE_MASK);
        if (flags & IF_MASK)
                set_IF(regs);
        else
@@ -389,7 +416,7 @@ static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_reg
 
 static inline unsigned long get_vflags(struct kernel_vm86_regs * regs)
 {
-       unsigned long flags = regs->eflags & RETURN_MASK;
+       unsigned long flags = regs->pt.eflags & RETURN_MASK;
 
        if (VEFLAGS & VIF_MASK)
                flags |= IF_MASK;
@@ -493,7 +520,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
        unsigned long __user *intr_ptr;
        unsigned long segoffs;
 
-       if (regs->cs == BIOSSEG)
+       if (regs->pt.xcs == BIOSSEG)
                goto cannot_handle;
        if (is_revectored(i, &KVM86->int_revectored))
                goto cannot_handle;
@@ -505,9 +532,9 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
        if ((segoffs >> 16) == BIOSSEG)
                goto cannot_handle;
        pushw(ssp, sp, get_vflags(regs), cannot_handle);
-       pushw(ssp, sp, regs->cs, cannot_handle);
+       pushw(ssp, sp, regs->pt.xcs, cannot_handle);
        pushw(ssp, sp, IP(regs), cannot_handle);
-       regs->cs = segoffs >> 16;
+       regs->pt.xcs = segoffs >> 16;
        SP(regs) -= 6;
        IP(regs) = segoffs & 0xffff;
        clear_TF(regs);
@@ -524,7 +551,7 @@ int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno
        if (VMPI.is_vm86pus) {
                if ( (trapno==3) || (trapno==1) )
                        return_to_32bit(regs, VM86_TRAP + (trapno << 8));
-               do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs));
+               do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs));
                return 0;
        }
        if (trapno !=1)
@@ -560,10 +587,10 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
                handle_vm86_trap(regs, 0, 1); \
        return; } while (0)
 
-       orig_flags = *(unsigned short *)&regs->eflags;
+       orig_flags = *(unsigned short *)&regs->pt.eflags;
 
-       csp = (unsigned char __user *) (regs->cs << 4);
-       ssp = (unsigned char __user *) (regs->ss << 4);
+       csp = (unsigned char __user *) (regs->pt.xcs << 4);
+       ssp = (unsigned char __user *) (regs->pt.xss << 4);
        sp = SP(regs);
        ip = IP(regs);
 
@@ -650,7 +677,7 @@ void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code)
                        SP(regs) += 6;
                }
                IP(regs) = newip;
-               regs->cs = newcs;
+               regs->pt.xcs = newcs;
                CHECK_IF_IN_TRAP;
                if (data32) {
                        set_vflags_long(newflags, regs);
index c6f84a0322ba30438dda746a630ef68e4ace7306..56e6ad5cb04556380ddbe2a0e386c4c34db03761 100644 (file)
@@ -1,13 +1,26 @@
 /* ld script to make i386 Linux kernel
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ *
+ * Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
  */
 
+/* Don't define absolute symbols until and unless you know that symbol
+ * value is should remain constant even if kernel image is relocated
+ * at run time. Absolute symbols are not relocated. If symbol value should
+ * change if kernel is relocated, make the symbol section relative and
+ * put it inside the section definition.
+ */
 #define LOAD_OFFSET __PAGE_OFFSET
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
 #include <asm/cache.h>
+#include <asm/boot.h>
 
 OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
 OUTPUT_ARCH(i386)
@@ -21,34 +34,35 @@ PHDRS {
 }
 SECTIONS
 {
-  . = __KERNEL_START;
+  . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
   phys_startup_32 = startup_32 - LOAD_OFFSET;
   /* read-only */
-  _text = .;                   /* Text and read-only data */
   .text : AT(ADDR(.text) - LOAD_OFFSET) {
+       _text = .;                      /* Text and read-only data */
        *(.text)
        SCHED_TEXT
        LOCK_TEXT
        KPROBES_TEXT
        *(.fixup)
        *(.gnu.warning)
-       } :text = 0x9090
-
-  _etext = .;                  /* End of text section */
+       _etext = .;                     /* End of text section */
+  } :text = 0x9090
 
   . = ALIGN(16);               /* Exception table */
-  __start___ex_table = .;
-  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) }
-  __stop___ex_table = .;
+  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+       __start___ex_table = .;
+        *(__ex_table)
+       __stop___ex_table = .;
+  }
 
   RODATA
 
   . = ALIGN(4);
-  __tracedata_start = .;
   .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
+       __tracedata_start = .;
        *(.tracedata)
+       __tracedata_end = .;
   }
-  __tracedata_end = .;
 
   /* writeable */
   . = ALIGN(4096);
@@ -57,11 +71,19 @@ SECTIONS
        CONSTRUCTORS
        } :data
 
+  .paravirtprobe : AT(ADDR(.paravirtprobe) - LOAD_OFFSET) {
+       __start_paravirtprobe = .;
+       *(.paravirtprobe)
+       __stop_paravirtprobe = .;
+  }
+
   . = ALIGN(4096);
-  __nosave_begin = .;
-  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
-  . = ALIGN(4096);
-  __nosave_end = .;
+  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+       __nosave_begin = .;
+       *(.data.nosave)
+       . = ALIGN(4096);
+       __nosave_end = .;
+  }
 
   . = ALIGN(4096);
   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
@@ -75,17 +97,10 @@ SECTIONS
 
   /* rarely changed data like cpu maps */
   . = ALIGN(32);
-  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
-  _edata = .;                  /* End of data section */
-
-#ifdef CONFIG_STACK_UNWIND
-  . = ALIGN(4);
-  .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
-       __start_unwind = .;
-       *(.eh_frame)
-       __end_unwind = .;
+  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+       *(.data.read_mostly)
+       _edata = .;             /* End of data section */
   }
-#endif
 
   . = ALIGN(THREAD_SIZE);      /* init_task */
   .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
@@ -94,88 +109,102 @@ SECTIONS
 
   /* might get freed after init */
   . = ALIGN(4096);
-  __smp_alt_begin = .;
-  __smp_alt_instructions = .;
   .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
+       __smp_alt_begin = .;
+       __smp_alt_instructions = .;
        *(.smp_altinstructions)
+       __smp_alt_instructions_end = .;
   }
-  __smp_alt_instructions_end = .;
   . = ALIGN(4);
-  __smp_locks = .;
   .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
+       __smp_locks = .;
        *(.smp_locks)
+       __smp_locks_end = .;
   }
-  __smp_locks_end = .;
   .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
        *(.smp_altinstr_replacement)
+       __smp_alt_end = .;
   }
+  /* will be freed after init
+   * Following ALIGN() is required to make sure no other data falls on the
+   * same page where __smp_alt_end is pointing as that page might be freed
+   * after boot. Always make sure that ALIGN() directive is present after
+   * the section which contains __smp_alt_end.
+   */
   . = ALIGN(4096);
-  __smp_alt_end = .;
 
   /* will be freed after init */
   . = ALIGN(4096);             /* Init code and data */
-  __init_begin = .;
   .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+       __init_begin = .;
        _sinittext = .;
        *(.init.text)
        _einittext = .;
   }
   .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
   . = ALIGN(16);
-  __setup_start = .;
-  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
-  __setup_end = .;
-  __initcall_start = .;
+  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+       __setup_start = .;
+       *(.init.setup)
+       __setup_end = .;
+   }
   .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+       __initcall_start = .;
        INITCALLS
+       __initcall_end = .;
   }
-  __initcall_end = .;
-  __con_initcall_start = .;
   .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+       __con_initcall_start = .;
        *(.con_initcall.init)
+       __con_initcall_end = .;
   }
-  __con_initcall_end = .;
   SECURITY_INIT
   . = ALIGN(4);
-  __alt_instructions = .;
   .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+       __alt_instructions = .;
        *(.altinstructions)
+       __alt_instructions_end = .;
   }
-  __alt_instructions_end = .; 
   .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
        *(.altinstr_replacement)
   }
+  . = ALIGN(4);
+  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
+       __start_parainstructions = .;
+       *(.parainstructions)
+       __stop_parainstructions = .;
+  }
   /* .exit.text is discard at runtime, not link time, to deal with references
      from .altinstructions and .eh_frame */
   .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
   .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
   . = ALIGN(4096);
-  __initramfs_start = .;
-  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
-  __initramfs_end = .;
+  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+       __initramfs_start = .;
+       *(.init.ramfs)
+       __initramfs_end = .;
+  }
   . = ALIGN(L1_CACHE_BYTES);
-  __per_cpu_start = .;
-  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
-  __per_cpu_end = .;
+  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
+       __per_cpu_start = .;
+       *(.data.percpu)
+       __per_cpu_end = .;
+  }
   . = ALIGN(4096);
-  __init_end = .;
   /* freed after init ends here */
        
-  __bss_start = .;             /* BSS */
-  .bss.page_aligned : AT(ADDR(.bss.page_aligned) - LOAD_OFFSET) {
-       *(.bss.page_aligned)
-  }
   .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
+       __init_end = .;
+       __bss_start = .;                /* BSS */
+       *(.bss.page_aligned)
        *(.bss)
+       . = ALIGN(4);
+       __bss_stop = .;
+       _end = . ;
+       /* This is where the kernel creates the early boot page tables */
+       . = ALIGN(4096);
+       pg0 = . ;
   }
-  . = ALIGN(4);
-  __bss_stop = .; 
-
-  _end = . ;
-
-  /* This is where the kernel creates the early boot page tables */
-  . = ALIGN(4096);
-  pg0 = .;
 
   /* Sections to be discarded */
   /DISCARD/ : {
index 94b1fd9cbe3cdaef7aaed0b5dcb84bfcff12d7a4..a7b3999bb37a8e4ab63b8f189de86e2619ee89fb 100644 (file)
@@ -45,7 +45,9 @@ static int __init parse_apic(char *arg)
                        return 0;
                }
        }
-       return -ENOENT;
+
+       /* Parsed again by __setup for debug/verbose */
+       return 0;
 }
 early_param("apic", parse_apic);
 
index f50c6c6ad680e8eb7c2092757d18291fa3e68ab6..943a9473b138c25752e4011f517b6126baeeb2ac 100644 (file)
@@ -776,7 +776,7 @@ voyager_cat_init(void)
                for(asic=0; asic < (*modpp)->num_asics; asic++) {
                        int j;
                        voyager_asic_t *asicp = *asicpp 
-                               = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
+                               = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL); /*&voyager_asic_storage[asic_count++];*/
                        voyager_sp_table_t *sp_table;
                        voyager_at_t *asic_table;
                        voyager_jtt_t *jtag_table;
@@ -785,7 +785,6 @@ voyager_cat_init(void)
                                printk("**WARNING** kmalloc failure in cat_init\n");
                                continue;
                        }
-                       memset(asicp, 0, sizeof(voyager_asic_t));
                        asicpp = &(asicp->next);
                        asicp->asic_location = asic;
                        sp_table = (voyager_sp_table_t *)(eprom_buf + sp_offset);
@@ -851,8 +850,7 @@ voyager_cat_init(void)
 #endif
 
                {
-                       struct resource *res = kmalloc(sizeof(struct resource),GFP_KERNEL);
-                       memset(res, 0, sizeof(struct resource));
+                       struct resource *res = kzalloc(sizeof(struct resource),GFP_KERNEL);
                        res->name = kmalloc(128, GFP_KERNEL);
                        sprintf((char *)res->name, "Voyager %s Quad CPI", cat_module_name(i));
                        res->start = qic_addr;
index f3fea2ad50fea81c5a4a3decfcdfc52d041b5f61..55428e656a3f9a90bddcb50770e92e7434fab16c 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/arch_hooks.h>
+#include <asm/pda.h>
 
 /* TLB state -- visible externally, indexed physically */
 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,6 +423,7 @@ find_smp_config(void)
             VOYAGER_SUS_IN_CONTROL_PORT);
 
        current_thread_info()->cpu = boot_cpu_id;
+       write_pda(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -458,7 +460,7 @@ start_secondary(void *unused)
        /* external functions not defined in the headers */
        extern void calibrate_delay(void);
 
-       cpu_init();
+       secondary_cpu_init();
 
        /* OK, we're in the routine */
        ack_CPI(VIC_CPU_BOOT_CPI);
@@ -578,6 +580,15 @@ do_boot_cpu(__u8 cpu)
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.esp = (void *) idle->thread.esp;
 
+       /* Pre-allocate and initialize the CPU's GDT and PDA so it
+          doesn't have to do any memory allocation during the
+          delicate CPU-bringup phase. */
+       if (!init_gdt(cpu, idle)) {
+               printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
+               cpucount--;
+               return;
+       }
+
        irq_ctx_init(cpu);
 
        /* Note: Don't modify initial ss override */
@@ -1963,4 +1974,5 @@ void __init
 smp_setup_processor_id(void)
 {
        current_thread_info()->cpu = hard_smp_processor_id();
+       write_pda(cpu_number, hard_smp_processor_id());
 }
index d62b20a3e660f5341c2bccf77c0a91885addc5e5..65120f52385332c67aa1b5275f407bc377a79c17 100644 (file)
@@ -57,6 +57,7 @@
 #define TAG_Special    Const(2)        /* De-normal, + or - infinity,
                                           or Not a Number */
 #define TAG_Empty      Const(3)        /* empty */
+#define TAG_Error      Const(0x80)     /* probably need to abort */
 
 #define LOADED_DATA    Const(10101)    /* Special st() number to identify
                                           loaded data (not on stack). */
index d93f16ef828f5986f7930b4aabbdd55ca92fd9cb..ddf8fa3bbd01d7c15ef03abf6c7c55c5356ebd57 100644 (file)
@@ -742,7 +742,8 @@ int save_i387_soft(void *s387, struct _fpstate __user * buf)
   S387->fcs &= ~0xf8000000;
   S387->fos |= 0xffff0000;
 #endif /* PECULIAR_486 */
-  __copy_to_user(d, &S387->cwd, 7*4);
+  if (__copy_to_user(d, &S387->cwd, 7*4))
+    return -1;
   RE_ENTRANT_CHECK_ON;
 
   d += 7*4;
index bf26341c8bdeaa01e771a73130065786e769ed71..a3ae28c49dddad063de9c177f7d0f13480df3da5 100644 (file)
@@ -68,6 +68,7 @@
 
 #define FPU_access_ok(x,y,z)   if ( !access_ok(x,y,z) ) \
                                math_abort(FPU_info,SIGSEGV)
+#define FPU_abort              math_abort(FPU_info, SIGSEGV)
 
 #undef FPU_IGNORE_CODE_SEGV
 #ifdef FPU_IGNORE_CODE_SEGV
index 85314be2fef8d9fc89995ba628f214fc2f153d9a..eebd6fb1c8a8a1933b0ffd1c3563aeac0b31a731 100644 (file)
@@ -227,6 +227,8 @@ int FPU_load_store(u_char type, fpu_addr_modes addr_modes,
     case 027:      /* fild m64int */
       clear_C1();
       loaded_tag = FPU_load_int64((long long __user *)data_address);
+      if (loaded_tag == TAG_Error)
+       return 0;
       FPU_settag0(loaded_tag);
       break;
     case 030:     /* fstenv  m14/28byte */
index f06ed41d191d53f5496c295bfaf1ca3b07179b69..e976caef64982849f3caa918ec5181bb3958b237 100644 (file)
@@ -244,7 +244,8 @@ int FPU_load_int64(long long __user *_s)
 
   RE_ENTRANT_CHECK_OFF;
   FPU_access_ok(VERIFY_READ, _s, 8);
-  copy_from_user(&s,_s,8);
+  if (copy_from_user(&s,_s,8))
+    FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   if (s == 0)
@@ -907,7 +908,8 @@ int FPU_store_int64(FPU_REG *st0_ptr, u_char st0_tag, long long __user *d)
 
   RE_ENTRANT_CHECK_OFF;
   FPU_access_ok(VERIFY_WRITE,d,8);
-  copy_to_user(d, &tll, 8);
+  if (copy_to_user(d, &tll, 8))
+    FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   return 1;
@@ -1336,7 +1338,8 @@ u_char __user *fstenv(fpu_addr_modes addr_modes, u_char __user *d)
       I387.soft.fcs &= ~0xf8000000;
       I387.soft.fos |= 0xffff0000;
 #endif /* PECULIAR_486 */
-      __copy_to_user(d, &control_word, 7*4);
+      if (__copy_to_user(d, &control_word, 7*4))
+       FPU_abort;
       RE_ENTRANT_CHECK_ON;
       d += 0x1c;
     }
@@ -1359,9 +1362,11 @@ void fsave(fpu_addr_modes addr_modes, u_char __user *data_address)
   FPU_access_ok(VERIFY_WRITE,d,80);
 
   /* Copy all registers in stack order. */
-  __copy_to_user(d, register_base+offset, other);
+  if (__copy_to_user(d, register_base+offset, other))
+    FPU_abort;
   if ( offset )
-    __copy_to_user(d+other, register_base, offset);
+    if (__copy_to_user(d+other, register_base, offset))
+      FPU_abort;
   RE_ENTRANT_CHECK_ON;
 
   finit();
index 4de11f508c3a0ffb161aa95e8cd910471323aae4..4de95a17a7d4de6e7cefc8604da03f3dda0fa56f 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #undef CONFIG_X86_PAE
+#undef CONFIG_PARAVIRT
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
index ddbdb0336f28f1c6d4d303d11055a929b7daf286..103b76e56a9484ea579f0a14f4626859a2e4bf8b 100644 (file)
@@ -168,7 +168,7 @@ static void __init allocate_pgdat(int nid)
        if (nid && node_has_online_mem(nid))
                NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
        else {
-               NODE_DATA(nid) = (pg_data_t *)(__va(min_low_pfn << PAGE_SHIFT));
+               NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(min_low_pfn));
                min_low_pfn += PFN_UP(sizeof(pg_data_t));
        }
 }
index 2581575786c135a0a0201cf63caf754a8654a755..aaaa4d225f7e25ad86c66511fcdf06d598ce9a09 100644 (file)
@@ -22,9 +22,9 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/desc.h>
 #include <asm/kdebug.h>
 #include <asm/segment.h>
@@ -167,7 +167,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
 { 
        unsigned long limit;
-       unsigned long instr = get_segment_eip (regs, &limit);
+       unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
        int scan_more = 1;
        int prefetch = 0; 
        int i;
@@ -177,9 +177,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                unsigned char instr_hi;
                unsigned char instr_lo;
 
-               if (instr > limit)
+               if (instr > (unsigned char *)limit)
                        break;
-               if (__get_user(opcode, (unsigned char __user *) instr))
+               if (probe_kernel_address(instr, opcode))
                        break; 
 
                instr_hi = opcode & 0xf0; 
@@ -204,9 +204,9 @@ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
                case 0x00:
                        /* Prefetch instruction is 0x0F0D or 0x0F18 */
                        scan_more = 0;
-                       if (instr > limit)
+                       if (instr > (unsigned char *)limit)
                                break;
-                       if (__get_user(opcode, (unsigned char __user *) instr))
+                       if (probe_kernel_address(instr, opcode))
                                break;
                        prefetch = (instr_lo == 0xF) &&
                                (opcode == 0x0D || opcode == 0x18);
index f9f647cdbc7ba3550e83d82c0c9e6996b3ae8878..e0fa6cb655a82574a50c3e8fc9334c2f71b9c156 100644 (file)
@@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
        unsigned long vaddr;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
+       pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
@@ -50,26 +50,22 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-#ifdef CONFIG_DEBUG_HIGHMEM
-       if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
-               dec_preempt_count();
-               preempt_check_resched();
-               return;
-       }
-
-       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
-               BUG();
-#endif
        /*
         * Force other mappings to Oops if they'll try to access this pte
         * without first remap it.  Keeping stale mappings around is a bad idea
         * also, in case the page changes cacheability attributes or becomes
         * a protected page in a hypervisor.
         */
-       kpte_clear_flush(kmap_pte-idx, vaddr);
+       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+               kpte_clear_flush(kmap_pte-idx, vaddr);
+       else {
+#ifdef CONFIG_DEBUG_HIGHMEM
+               BUG_ON(vaddr < PAGE_OFFSET);
+               BUG_ON(vaddr >= (unsigned long)high_memory);
+#endif
+       }
 
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 /* This is the same as kmap_atomic() but can map memory that doesn't
@@ -80,7 +76,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
index 1719a8141f81aee9f480087be3e54a68eef4adef..34728e4afe4806ad88982922ebbf9654023e4b64 100644 (file)
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+                               struct vm_area_struct *vma,
+                               unsigned long addr, pgoff_t idx)
+{
+       unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+                               svma->vm_start;
+       unsigned long sbase = saddr & PUD_MASK;
+       unsigned long s_end = sbase + PUD_SIZE;
+
+       /*
+        * match the virtual addresses, permission and the alignment of the
+        * page table page.
+        */
+       if (pmd_index(addr) != pmd_index(saddr) ||
+           vma->vm_flags != svma->vm_flags ||
+           sbase < svma->vm_start || svma->vm_end < s_end)
+               return 0;
+
+       return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+       unsigned long base = addr & PUD_MASK;
+       unsigned long end = base + PUD_SIZE;
+
+       /*
+        * check on proper vm_flags and page table alignment
+        */
+       if (vma->vm_flags & VM_MAYSHARE &&
+           vma->vm_start <= base && end <= vma->vm_end)
+               return 1;
+       return 0;
+}
+
+/*
+ * search for a shareable pmd page for hugetlb.
+ */
+static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+       struct vm_area_struct *vma = find_vma(mm, addr);
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+                       vma->vm_pgoff;
+       struct prio_tree_iter iter;
+       struct vm_area_struct *svma;
+       unsigned long saddr;
+       pte_t *spte = NULL;
+
+       if (!vma_shareable(vma, addr))
+               return;
+
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
+               if (svma == vma)
+                       continue;
+
+               saddr = page_table_shareable(svma, vma, addr, idx);
+               if (saddr) {
+                       spte = huge_pte_offset(svma->vm_mm, saddr);
+                       if (spte) {
+                               get_page(virt_to_page(spte));
+                               break;
+                       }
+               }
+       }
+
+       if (!spte)
+               goto out;
+
+       spin_lock(&mm->page_table_lock);
+       if (pud_none(*pud))
+               pud_populate(mm, pud, (unsigned long) spte & PAGE_MASK);
+       else
+               put_page(virt_to_page(spte));
+       spin_unlock(&mm->page_table_lock);
+out:
+       spin_unlock(&mapping->i_mmap_lock);
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ *         0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       pgd_t *pgd = pgd_offset(mm, *addr);
+       pud_t *pud = pud_offset(pgd, *addr);
+
+       BUG_ON(page_count(virt_to_page(ptep)) == 0);
+       if (page_count(virt_to_page(ptep)) == 1)
+               return 0;
+
+       pud_clear(pud);
+       put_page(virt_to_page(ptep));
+       *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+       return 1;
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
@@ -25,8 +132,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 
        pgd = pgd_offset(mm, addr);
        pud = pud_alloc(mm, pgd, addr);
-       if (pud)
+       if (pud) {
+               if (pud_none(*pud))
+                       huge_pmd_share(mm, addr, pud);
                pte = (pte_t *) pmd_alloc(mm, pud, addr);
+       }
        BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
 
        return pte;
index 167416155ee41606007452bc6d14f2b845757326..84697dfc7348b93c6be087584b8653a0e6fd427b 100644 (file)
@@ -192,8 +192,6 @@ static inline int page_kills_ppro(unsigned long pagenr)
        return 0;
 }
 
-extern int is_available_memory(efi_memory_desc_t *);
-
 int page_is_ram(unsigned long pagenr)
 {
        int i;
@@ -699,8 +697,8 @@ int remove_memory(u64 start, u64 size)
 #endif
 #endif
 
-kmem_cache_t *pgd_cache;
-kmem_cache_t *pmd_cache;
+struct kmem_cache *pgd_cache;
+struct kmem_cache *pmd_cache;
 
 void __init pgtable_cache_init(void)
 {
index 8564b6ae17e330aa7ca48489a3f71bbf458415ba..ad91528bdc14f0cd341a6e3be8d04f29bd9b4e52 100644 (file)
@@ -67,11 +67,17 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
        return base;
 } 
 
-static void flush_kernel_map(void *dummy) 
+static void flush_kernel_map(void *arg)
 { 
-       /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
-       if (boot_cpu_data.x86_model >= 4) 
+       unsigned long adr = (unsigned long)arg;
+
+       if (adr && cpu_has_clflush) {
+               int i;
+               for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+                       asm volatile("clflush (%0)" :: "r" (adr + i));
+       } else if (boot_cpu_data.x86_model >= 4)
                wbinvd();
+
        /* Flush all to work around Errata in early athlons regarding 
         * large page flushing. 
         */
@@ -173,9 +179,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
        return 0;
 } 
 
-static inline void flush_map(void)
+static inline void flush_map(void *adr)
 {
-       on_each_cpu(flush_kernel_map, NULL, 1, 1);
+       on_each_cpu(flush_kernel_map, adr, 1, 1);
 }
 
 /*
@@ -217,9 +223,13 @@ void global_flush_tlb(void)
        spin_lock_irq(&cpa_lock);
        list_replace_init(&df_list, &l);
        spin_unlock_irq(&cpa_lock);
-       flush_map();
-       list_for_each_entry_safe(pg, next, &l, lru)
+       if (!cpu_has_clflush)
+               flush_map(0);
+       list_for_each_entry_safe(pg, next, &l, lru) {
+               if (cpu_has_clflush)
+                       flush_map(page_address(pg));
                __free_page(pg);
+       }
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
index 10126e3f81745f5591eab2e66dc4426394c3c2cb..f349eaf450b0db542c5c3e56c3b598da94562a66 100644 (file)
@@ -95,8 +95,11 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
                return;
        }
        pte = pte_offset_kernel(pmd, vaddr);
-       /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte(pfn, flags));
+       if (pgprot_val(flags))
+               /* <pfn,flags> stored as-is, to permit clearing entries */
+               set_pte(pte, pfn_pte(pfn, flags));
+       else
+               pte_clear(&init_mm, vaddr, pte);
 
        /*
         * It's enough to flush this one mapping.
@@ -193,7 +196,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
 {
        memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
 }
@@ -233,7 +236,7 @@ static inline void pgd_list_del(pgd_t *pgd)
                set_page_private(next, (unsigned long)pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags;
 
@@ -253,7 +256,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
 
index 713d6c866cae0b9f8ecc56c82fc04cebde642d80..42df4b6606dfdbc6f21d2cce0cbb6e8ae3e3ea42 100644 (file)
@@ -45,6 +45,13 @@ void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
        outl(val, 0xcfc);
 }
 
+void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
+{
+       PDprintk("%x writing to %x: %x\n", slot, offset, val);
+       outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
+       outb(val, 0xcfc);
+}
+
 int early_pci_allowed(void)
 {
        return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
index e65551cd82162522ed0dfd919cb6e653f6e1b7b5..f2cb942f828197ae61b36c2314cc8b2fcee75446 100644 (file)
@@ -764,7 +764,7 @@ static void __init pirq_find_router(struct irq_router *r)
        DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
            rt->rtr_vendor, rt->rtr_device);
 
-       pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
+       pirq_router_dev = pci_get_bus_and_slot(rt->rtr_bus, rt->rtr_devfn);
        if (!pirq_router_dev) {
                DBG(KERN_DEBUG "PCI: Interrupt router not found at "
                        "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
@@ -784,6 +784,8 @@ static void __init pirq_find_router(struct irq_router *r)
                pirq_router_dev->vendor,
                pirq_router_dev->device,
                pci_name(pirq_router_dev));
+
+       /* The device remains referenced for the kernel lifetime */
 }
 
 static struct irq_info *pirq_get_info(struct pci_dev *dev)
index ed1512a175ab08e200751ed5078f2373a9b5760f..5f5193401beadea0302538b348dacd1a76b261c7 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include "pci.h"
 #include "pci-functions.h"
 
@@ -314,6 +315,10 @@ static struct pci_raw_ops * __devinit pci_find_bios(void)
        for (check = (union bios32 *) __va(0xe0000);
             check <= (union bios32 *) __va(0xffff0);
             ++check) {
+               long sig;
+               if (probe_kernel_address(&check->fields.signature, sig))
+                       continue;
+
                if (check->fields.signature != BIOS32_SIGNATURE)
                        continue;
                length = check->fields.length * 16;
@@ -331,11 +336,13 @@ static struct pci_raw_ops * __devinit pci_find_bios(void)
                }
                DBG("PCI: BIOS32 Service Directory structure at 0x%p\n", check);
                if (check->fields.entry >= 0x100000) {
-                       printk("PCI: BIOS32 entry (0x%p) in high memory, cannot use.\n", check);
+                       printk("PCI: BIOS32 entry (0x%p) in high memory, "
+                                       "cannot use.\n", check);
                        return NULL;
                } else {
                        unsigned long bios32_entry = check->fields.entry;
-                       DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n", bios32_entry);
+                       DBG("PCI: BIOS32 Service Directory entry at 0x%lx\n",
+                                       bios32_entry);
                        bios32_indirect.address = bios32_entry + PAGE_OFFSET;
                        if (check_pcibios())
                                return &pci_bios_access;
index 8cfa4e8a719d6ea2dd9cf140862cb2c6a64b669a..2de7bbf03cd7f0d35a93aa5e7b778127699367ef 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_PM)               += cpu.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
+obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
index 5a1abeff033ba7ed33ecb97151aba3186aa62446..2c15500f8713e001bd8c71fbc59136525a408fb9 100644 (file)
@@ -26,8 +26,8 @@ void __save_processor_state(struct saved_context *ctxt)
        /*
         * descriptor tables
         */
-       store_gdt(&ctxt->gdt_limit);
-       store_idt(&ctxt->idt_limit);
+       store_gdt(&ctxt->gdt);
+       store_idt(&ctxt->idt);
        store_tr(ctxt->tr);
 
        /*
@@ -99,8 +99,8 @@ void __restore_processor_state(struct saved_context *ctxt)
         * now restore the descriptor tables to their proper values
         * ltr is done i fix_processor_context().
         */
-       load_gdt(&ctxt->gdt_limit);
-       load_idt(&ctxt->idt_limit);
+       load_gdt(&ctxt->gdt);
+       load_idt(&ctxt->idt);
 
        /*
         * segment registers
diff --git a/arch/i386/power/suspend.c b/arch/i386/power/suspend.c
new file mode 100644 (file)
index 0000000..db5e98d
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Suspend support specific for i386 - temporary page tables
+ *
+ * Distribute under GPLv2
+ *
+ * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ */
+
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* Defined in arch/i386/power/swsusp.S */
+extern int restore_image(void);
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+/* The following three functions are based on the analogous code in
+ * arch/i386/mm/init.c
+ */
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry.  This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+       pud_t *pud;
+       pmd_t *pmd_table;
+
+#ifdef CONFIG_X86_PAE
+       pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
+       if (!pmd_table)
+               return NULL;
+
+       set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
+       pud = pud_offset(pgd, 0);
+
+       BUG_ON(pmd_table != pmd_offset(pud, 0));
+#else
+       pud = pud_offset(pgd, 0);
+       pmd_table = pmd_offset(pud, 0);
+#endif
+
+       return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+       if (pmd_none(*pmd)) {
+               pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+               if (!page_table)
+                       return NULL;
+
+               set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
+
+               BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+               return page_table;
+       }
+
+       return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+       unsigned long pfn;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int pgd_idx, pmd_idx;
+
+       pgd_idx = pgd_index(PAGE_OFFSET);
+       pgd = pgd_base + pgd_idx;
+       pfn = 0;
+
+       for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+               pmd = resume_one_md_table_init(pgd);
+               if (!pmd)
+                       return -ENOMEM;
+
+               if (pfn >= max_low_pfn)
+                       continue;
+
+               for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+                       if (pfn >= max_low_pfn)
+                               break;
+
+                       /* Map with big pages if possible, otherwise create
+                        * normal page tables.
+                        * NOTE: We can mark everything as executable here
+                        */
+                       if (cpu_has_pse) {
+                               set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
+                               pfn += PTRS_PER_PTE;
+                       } else {
+                               pte_t *max_pte;
+
+                               pte = resume_one_page_table_init(pmd);
+                               if (!pte)
+                                       return -ENOMEM;
+
+                               max_pte = pte + PTRS_PER_PTE;
+                               for (; pte < max_pte; pte++, pfn++) {
+                                       if (pfn >= max_low_pfn)
+                                               break;
+
+                                       set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+                               }
+                       }
+               }
+       }
+       return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+#ifdef CONFIG_X86_PAE
+       int i;
+
+       /* Init entries of the first-level page table to the zero page */
+       for (i = 0; i < PTRS_PER_PGD; i++)
+               set_pgd(pg_dir + i,
+                       __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
+#endif
+}
+
+int swsusp_arch_resume(void)
+{
+       int error;
+
+       resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!resume_pg_dir)
+               return -ENOMEM;
+
+       resume_init_first_level_page_table(resume_pg_dir);
+       error = resume_physical_mapping_init(resume_pg_dir);
+       if (error)
+               return error;
+
+       /* We have got enough memory and from now on we cannot recover */
+       restore_image();
+       return 0;
+}
index 8a2b50a0aaad25ded242877a5d0548d51f5e6543..53662e05b393e2c294734c71577705a5beb550c0 100644 (file)
@@ -28,8 +28,9 @@ ENTRY(swsusp_arch_suspend)
        call swsusp_save
        ret
 
-ENTRY(swsusp_arch_resume)
-       movl    $swsusp_pg_dir-__PAGE_OFFSET, %ecx
+ENTRY(restore_image)
+       movl    resume_pg_dir, %ecx
+       subl    $__PAGE_OFFSET, %ecx
        movl    %ecx, %cr3
 
        movl    restore_pblist, %edx
@@ -51,6 +52,10 @@ copy_loop:
        .p2align 4,,7
 
 done:
+       /* go back to the original page tables */
+       movl    $swapper_pg_dir, %ecx
+       subl    $__PAGE_OFFSET, %ecx
+       movl    %ecx, %cr3
        /* Flush TLB, including "global" things (vmalloc) */
        movl    mmu_cr4_features, %eax
        movl    %eax, %edx
index caab986af70c0ef7efa7a881a7b73c43c2254261..b62f0c4d2c7cb6909b03f8d9d3143bd7bec32a32 100644 (file)
@@ -209,7 +209,7 @@ static void do_serial_bh(void)
 }
 #endif
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *private_)
 {
        printk(KERN_ERR "simserial: do_softint called\n");
 }
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
        info->flags = sstate->flags;
        info->xmit_fifo_size = sstate->xmit_fifo_size;
        info->line = line;
-       INIT_WORK(&info->work, do_softint, info);
+       INIT_WORK(&info->work, do_softint);
        info->state = sstate;
        if (sstate->info) {
                kfree(info);
index daa6b91bc921907435c47151303cc8ef81d6ff59..578737ec7629374253ea7b53029b8a3fe7d6848c 100644 (file)
@@ -91,7 +91,7 @@ ia64_elf32_init (struct pt_regs *regs)
         * it with privilege level 3 because the IVE uses non-privileged accesses to these
         * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
                memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
@@ -117,7 +117,7 @@ ia64_elf32_init (struct pt_regs *regs)
         * code is locked in specific gate page, which is pointed by pretcode
         * when setup_frame_ia32
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
                memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
@@ -142,7 +142,7 @@ ia64_elf32_init (struct pt_regs *regs)
         * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
         * until a task modifies them via modify_ldt().
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
                memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
@@ -214,7 +214,7 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt)
                return -ENOMEM;
 
index c187743965a02029df7090843ae30e2059afe4d6..6af400a12ca1335ca324a0e6c388ad7ec50c8819 100644 (file)
@@ -249,7 +249,7 @@ ia32_init (void)
 
 #if PAGE_SHIFT > IA32_PAGE_SHIFT
        {
-               extern kmem_cache_t *partial_page_cachep;
+               extern struct kmem_cache *partial_page_cachep;
 
                partial_page_cachep = kmem_cache_create("partial_page_cache",
                                                        sizeof(struct partial_page), 0, 0,
index 703a67c934f84c6619bfa7693c247eb9fca6e143..cfa0bc0026b5dd42b283242dfb13effc0e71ca7f 100644 (file)
@@ -330,8 +330,6 @@ struct old_linux32_dirent {
 void ia64_elf32_init(struct pt_regs *regs);
 #define ELF_PLAT_INIT(_r, load_addr)   ia64_elf32_init(_r)
 
-#define elf_addr_t     u32
-
 /* This macro yields a bitmask that programs can use to figure out
    what instruction set this CPU supports.  */
 #define ELF_HWCAP      0
index 9d6a3f210148f3e1b9a775b8615a88841803e479..a4a6e1463af88ede9e77eabb7bada918b26eb446 100644 (file)
@@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
 }
 
 /* SLAB cache for partial_page structures */
-kmem_cache_t *partial_page_cachep;
+struct kmem_cache *partial_page_cachep;
 
 /*
  * init partial_page_list.
index 51217d63285ea35122d9373647e2dd40c5cd0346..4d592ee9300b2739d6c82f569f0b1a02254ee09e 100644 (file)
@@ -481,7 +481,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
        mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
+       free_insn_slot(p->ainsn.insn, 0);
        mutex_unlock(&kprobe_mutex);
 }
 /*
index 7cfa63a98cb36adcd48ddb18d6022f94c6cf063b..6bedd97570ca97dceddbd7ab46270160e5a1f771 100644 (file)
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy)
  * disable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_disable_keventd(void *unused)
+ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
 {
        on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
 }
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused)
  * enable the cmc interrupt vector.
  */
 static void
-ia64_mca_cmc_vector_enable_keventd(void *unused)
+ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
 {
        on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
 }
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
        monarch_cpu = -1;
 }
 
-static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
-static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
+static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
+static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
 
 /*
  * ia64_mca_cmc_int_handler
index 0b546e2b36ac61c113b73ada6e98775a30294e25..c4c10a0b99d94d1957e4457c6b65de6d6ecdf024 100644 (file)
@@ -952,7 +952,6 @@ remove_palinfo_proc_entries(unsigned int hcpu)
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int palinfo_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
@@ -974,7 +973,6 @@ static struct notifier_block palinfo_cpu_notifier =
        .notifier_call = palinfo_cpu_callback,
        .priority = 0,
 };
-#endif
 
 static int __init
 palinfo_init(void)
index 3aaede0d6981783037b1eaec5d0a0b7e364d57ef..e2321536ee4c067249bc9169524e1da0933f1cc9 100644 (file)
@@ -2302,7 +2302,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned lon
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
index e63b8ca5344a202d85b8cb1d25a4edb08c486894..fd607ca51a8df0f184f9c4327742b7defc1925db 100644 (file)
@@ -575,7 +575,6 @@ static struct file_operations salinfo_data_fops = {
        .write   = salinfo_log_write,
 };
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __devinit
 salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
 {
@@ -620,7 +619,6 @@ static struct notifier_block salinfo_cpu_notifier =
        .notifier_call = salinfo_cpu_callback,
        .priority = 0,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static int __init
 salinfo_init(void)
index f7d7f5668144fcec8e76bcadb66c5df27aad17d9..b21ddecea943a63f51130b20fd8372322cec29e2 100644 (file)
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
 }
 
 struct create_idle {
+       struct work_struct work;
        struct task_struct *idle;
        struct completion done;
        int cpu;
 };
 
 void
-do_fork_idle(void *_c_idle)
+do_fork_idle(struct work_struct *work)
 {
-       struct create_idle *c_idle = _c_idle;
+       struct create_idle *c_idle =
+               container_of(work, struct create_idle, work);
 
        c_idle->idle = fork_idle(c_idle->cpu);
        complete(&c_idle->done);
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu)
 {
        int timeout;
        struct create_idle c_idle = {
+               .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
                .cpu    = cpu,
                .done   = COMPLETION_INITIALIZER(c_idle.done),
        };
-       DECLARE_WORK(work, do_fork_idle, &c_idle);
 
        c_idle.idle = get_idle_for_cpu(cpu);
        if (c_idle.idle) {
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu)
         * We can't use kernel_thread since we must avoid to reschedule the child.
         */
        if (!keventd_up() || current_is_keventd())
-               work.func(work.data);
+               c_idle.work.func(&c_idle.work);
        else {
-               schedule_work(&work);
+               schedule_work(&c_idle.work);
                wait_for_completion(&c_idle.done);
        }
 
index 5629b45e89c6bc50892c4a8e69b66d0f0be67d86..687500ddb4b872a59676396dd04cbed4c7a214ef 100644 (file)
@@ -31,11 +31,11 @@ int arch_register_cpu(int num)
 {
 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
        /*
-        * If CPEI cannot be re-targetted, and this is
-        * CPEI target, then dont create the control file
+        * If CPEI can be re-targetted or if this is not
+        * CPEI target, then it is hotpluggable
         */
-       if (!can_cpei_retarget() && is_cpu_cpei_target(num))
-               sysfs_cpus[num].cpu.no_control = 1;
+       if (can_cpei_retarget() || !is_cpu_cpei_target(num))
+               sysfs_cpus[num].cpu.hotpluggable = 1;
        map_cpu_to_node(num, node_cpuid[num].nid);
 #endif
 
index f3a9585e98a8337a8cbd8b06c6916d5a8b3a8612..0c7e94edc20e9eaf9b30217246bbe5de3bafd191 100644 (file)
@@ -64,6 +64,11 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
        return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
 
 /*
index ff87a5cba399f873348858fa7775a608ab2c3fb7..56dc2024220e424d2dd6e1d5de57c4c65e17a0e0 100644 (file)
@@ -156,7 +156,7 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (vma) {
                memset(vma, 0, sizeof(*vma));
                vma->vm_mm = current->mm;
@@ -175,7 +175,7 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (vma) {
                        memset(vma, 0, sizeof(*vma));
                        vma->vm_mm = current->mm;
index f4edfbf27134e5e0c19432f750173ae1932d34f8..eb92cef9cd0de15190d303b0d076c77c480e2944 100644 (file)
@@ -564,8 +564,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask)
 void
 pcibios_disable_device (struct pci_dev *dev)
 {
-       if (dev->is_enabled)
-               acpi_pci_irq_disable(dev);
+       BUG_ON(atomic_read(&dev->enable_cnt));
+       acpi_pci_irq_disable(dev);
 }
 
 void
index 0e7778be33ccc93503e5b28a4f902f01e13674b4..936205f7aba079284ad4ad82318850f13659b1b0 100644 (file)
@@ -196,9 +196,7 @@ static unsigned long __init setup_memory(void)
        if (LOADER_TYPE && INITRD_START) {
                if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
                        reserve_bootmem(INITRD_START, INITRD_SIZE);
-                       initrd_start = INITRD_START ?
-                               INITRD_START + PAGE_OFFSET : 0;
-
+                       initrd_start = INITRD_START + PAGE_OFFSET;
                        initrd_end = initrd_start + INITRD_SIZE;
                        printk("initrd:start[%08lx],size[%08lx]\n",
                                initrd_start, INITRD_SIZE);
index b60cea4aebaa6d9e663cae8eef42084d4b30fd26..092ea86bb0794a473780c2b474197f2bcf4ef1ee 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/unistd.h>
 #include <linux/stddef.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <asm/cacheflush.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
index abb34ccd59862453a6c20265a8268fc58789d1ce..c7efdb0aefc5998768b0984137b035ce3f1cd7c3 100644 (file)
@@ -105,9 +105,7 @@ unsigned long __init setup_memory(void)
                if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
                        reserve_bootmem_node(NODE_DATA(0), INITRD_START,
                                INITRD_SIZE);
-                       initrd_start = INITRD_START ?
-                               INITRD_START + PAGE_OFFSET : 0;
-
+                       initrd_start = INITRD_START + PAGE_OFFSET;
                        initrd_end = initrd_start + INITRD_SIZE;
                        printk("initrd:start[%08lx],size[%08lx]\n",
                                initrd_start, INITRD_SIZE);
index de1304c91112e1a42bd56ebe4de4bf40ef528584..fa015d80161701ccf3e8a390f8e373e072ba08f8 100644 (file)
@@ -52,10 +52,9 @@ void *amiga_chip_alloc(unsigned long size, const char *name)
 #ifdef DEBUG
     printk("amiga_chip_alloc: allocate %ld bytes\n", size);
 #endif
-    res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+    res = kzalloc(sizeof(struct resource), GFP_KERNEL);
     if (!res)
        return NULL;
-    memset(res, 0, sizeof(struct resource));
     res->name = name;
 
     if (allocate_resource(&chipram_res, res, size, 0, UINT_MAX, PAGE_SIZE, NULL, NULL) < 0) {
index 6ca57b6564da1ab138f318cd8247f885acd3180d..bee2b1443e368763d9473bc86894b3ed0b60a5b2 100644 (file)
@@ -375,10 +375,9 @@ struct pci_bus_info * __init init_hades_pci(void)
         * Allocate memory for bus info structure.
         */
 
-       bus = kmalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
+       bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
        if (!bus)
                return NULL;
-       memset(bus, 0, sizeof(struct pci_bus_info));
 
        /*
         * Claim resources. The m68k has no separate I/O space, both
index 911f2ce3f53e11ec6d1e0deeba875735d5c2a13b..2adbeb16e1b8aba1bc98e148121d9dc0d9b207c2 100644 (file)
@@ -99,7 +99,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        down_read(&mm->mmap_sem);
index 58afa8be604ea1f1f6ab308e3634ea258f63c4bc..2b2a10da64a4b3af8e8daebaadc876314bc8fcd9 100644 (file)
@@ -60,6 +60,7 @@ SECTIONS {
 #endif
 
        .text : {
+               _text = .;
                _stext = . ;
                *(.text)
                SCHED_TEXT
index 24781f009337ae69a4b7e33e10b1697daf2edd50..e5668af19789ccae8e2843befd28f2749341d3ed 100644 (file)
@@ -3,7 +3,7 @@
 /*
  *     timers.c -- generic ColdFire hardware timer support.
  *
- *     Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ *     Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com)
  */
 
 /***************************************************************************/
@@ -44,6 +44,14 @@ unsigned int mcf_timerlevel = 5;
 extern void mcf_settimericr(int timer, int level);
 extern int mcf_timerirqpending(int timer);
 
+#if defined(CONFIG_M532x)
+#define        __raw_readtrr   __raw_readl
+#define        __raw_writetrr  __raw_writel
+#else
+#define        __raw_readtrr   __raw_readw
+#define        __raw_writetrr  __raw_writew
+#endif
+
 /***************************************************************************/
 
 void coldfire_tick(void)
@@ -57,7 +65,7 @@ void coldfire_tick(void)
 void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
 {
        __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
-       __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
+       __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR));
        __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
                MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));
 
@@ -76,7 +84,7 @@ unsigned long coldfire_timer_offset(void)
        unsigned long trr, tcn, offset;
 
        tcn = __raw_readw(TA(MCFTIMER_TCN));
-       trr = __raw_readw(TA(MCFTIMER_TRR));
+       trr = __raw_readtrr(TA(MCFTIMER_TRR));
        offset = (tcn * (1000000 / HZ)) / trr;
 
        /* Check if we just wrapped the counters and maybe missed a tick */
@@ -120,7 +128,7 @@ void coldfire_profile_init(void)
        /* Set up TIMER 2 as high speed profile clock */
        __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR));
 
-       __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
+       __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR));
        __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
                MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
 
index c5482e3622eb158b76804c8d5e5a45a47df16b68..1b36f62617645358a2c8b0aadd20d7c77bbc1f6c 100644 (file)
@@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp,
 {
 }
 
-int BSP_hwclk(int op, struct hwclk_time *t)
+int BSP_hwclk(int op, struct rtc_time *t)
 {
   if (!op) {
     /* read */
index 27f83e64296887127d2fff9fe526f543c113fcda..d8af858fe3f5eb2b319bfc2101e152e33b14b6c1 100644 (file)
@@ -16,6 +16,7 @@ config MIPS_MTX1
        bool "4G Systems MTX-1 board"
        select DMA_NONCOHERENT
        select HW_HAS_PCI
+       select RESOURCES_64BIT if PCI
        select SOC_AU1500
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -32,6 +33,7 @@ config MIPS_PB1000
        select SOC_AU1000
        select DMA_NONCOHERENT
        select HW_HAS_PCI
+       select RESOURCES_64BIT if PCI
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -41,6 +43,7 @@ config MIPS_PB1100
        select SOC_AU1100
        select DMA_NONCOHERENT
        select HW_HAS_PCI
+       select RESOURCES_64BIT if PCI
        select SWAP_IO_SPACE
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -50,6 +53,7 @@ config MIPS_PB1500
        select SOC_AU1500
        select DMA_NONCOHERENT
        select HW_HAS_PCI
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -59,6 +63,7 @@ config MIPS_PB1550
        select DMA_NONCOHERENT
        select HW_HAS_PCI
        select MIPS_DISABLE_OBSOLETE_IDE
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -67,6 +72,7 @@ config MIPS_PB1200
        select SOC_AU1200
        select DMA_NONCOHERENT
        select MIPS_DISABLE_OBSOLETE_IDE
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -75,6 +81,7 @@ config MIPS_DB1000
        select SOC_AU1000
        select DMA_NONCOHERENT
        select HW_HAS_PCI
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -91,6 +98,7 @@ config MIPS_DB1500
        select DMA_NONCOHERENT
        select HW_HAS_PCI
        select MIPS_DISABLE_OBSOLETE_IDE
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -101,6 +109,7 @@ config MIPS_DB1550
        select HW_HAS_PCI
        select DMA_NONCOHERENT
        select MIPS_DISABLE_OBSOLETE_IDE
+       select RESOURCES_64BIT if PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -233,6 +242,7 @@ config LASAT
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config MIPS_ATLAS
        bool "MIPS Atlas board"
@@ -256,6 +266,7 @@ config MIPS_ATLAS
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL
+       select GENERIC_HARDIRQS_NO__DO_IRQ
        help
          This enables support for the MIPS Technologies Atlas evaluation
          board.
@@ -410,6 +421,7 @@ config MOMENCO_OCELOT_C
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
        help
          The Ocelot is a MIPS-based Single Board Computer (SBC) made by
          Momentum Computer <http://www.momenco.com/>.
@@ -560,6 +572,7 @@ config SGI_IP27
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_NUMA
        select SYS_SUPPORTS_SMP
+       select GENERIC_HARDIRQS_NO__DO_IRQ
        help
          This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics
          workstations.  To compile a Linux kernel that runs on these, say Y
@@ -826,6 +839,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
        bool
        default y
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+       bool
+       default n
+
 #
 # Select some configuration options automatically based on user selections.
 #
@@ -987,6 +1004,7 @@ config SOC_PNX8550
        select HW_HAS_PCI
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_32BIT_KERNEL
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config SWAP_IO_SPACE
        bool
@@ -1268,6 +1286,7 @@ config CPU_RM9000
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select WEAK_ORDERING
 
 config CPU_SB1
        bool "SB1"
@@ -1276,6 +1295,7 @@ config CPU_SB1
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_64BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select WEAK_ORDERING
 
 endchoice
 
@@ -1336,6 +1356,8 @@ config SYS_HAS_CPU_RM9000
 config SYS_HAS_CPU_SB1
        bool
 
+config WEAK_ORDERING
+       bool
 endmenu
 
 #
@@ -1940,6 +1962,11 @@ config COMPAT
        depends on MIPS32_COMPAT
        default y
 
+config SYSVIPC_COMPAT
+       bool
+       depends on COMPAT && SYSVIPC
+       default y
+
 config MIPS32_O32
        bool "Kernel support for o32 binaries"
        depends on MIPS32_COMPAT
index c8430c07355ed5c26da325ac7e0d79f1dc251e21..6d55e8aab66830fa8dd5c388724dfe91baac735a 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/cpu.h>
 #include <asm/irq_regs.h>
 #include <asm/processor.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 
index 269b22b34313212191a70fba6154477f79318cb8..4c7cb4048d358126b84b81d509fe7a8d9ec94903 100644 (file)
@@ -67,7 +67,6 @@ static struct irq_chip ioasic_irq_type = {
        .mask = mask_ioasic_irq,
        .mask_ack = ack_ioasic_irq,
        .unmask = unmask_ioasic_irq,
-       .end = end_ioasic_irq,
 };
 
 
@@ -106,8 +105,7 @@ void __init init_ioasic_irqs(int base)
                set_irq_chip_and_handler(i, &ioasic_irq_type,
                                         handle_level_irq);
        for (; i < base + IO_IRQ_LINES; i++)
-               set_irq_chip_and_handler(i, &ioasic_dma_irq_type,
-                                        handle_level_irq);
+               set_irq_chip(i, &ioasic_dma_irq_type);
 
        ioasic_irq_base = base;
 }
index f19b4617a0a6c4a67f9439a86956aa14ebe91678..d3b8002bf1e77db470427692de4a1e0da8bfe40e 100644 (file)
 #include <linux/types.h>
 
 #include <asm/inst.h>
+#include <asm/irq_regs.h>
 #include <asm/mipsregs.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
index 5a9be4c93584e68a7c70e95775e5ef01aa23c97a..916e46b8ccd8489cb27b74bcfc31da537d6b102f 100644 (file)
@@ -57,19 +57,12 @@ static void ack_kn02_irq(unsigned int irq)
        iob();
 }
 
-static void end_kn02_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               unmask_kn02_irq(irq);
-}
-
 static struct irq_chip kn02_irq_type = {
        .typename = "KN02-CSR",
        .ack = ack_kn02_irq,
        .mask = mask_kn02_irq,
        .mask_ack = ack_kn02_irq,
        .unmask = unmask_kn02_irq,
-       .end = end_kn02_irq,
 };
 
 
index 59b98299c89694ac3a04fa6fbb23edab0f76066c..8d880f0b06ec1f1a0241ab5bfdd1d1fbfa70bb10 100644 (file)
@@ -56,19 +56,12 @@ static void emma2rh_irq_disable(unsigned int irq)
        ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
 }
 
-static void emma2rh_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               ll_emma2rh_irq_enable(irq - emma2rh_irq_base);
-}
-
 struct irq_chip emma2rh_irq_controller = {
        .typename = "emma2rh_irq",
        .ack = emma2rh_irq_disable,
        .mask = emma2rh_irq_disable,
        .mask_ack = emma2rh_irq_disable,
        .unmask = emma2rh_irq_enable,
-       .end = emma2rh_irq_end,
 };
 
 void emma2rh_irq_init(u32 irq_base)
index 3ac4e405ecdce6253065e5d6f8e464672da516bf..2116d9be5fa9185e20cd1c531f9f62bd28014732 100644 (file)
@@ -48,19 +48,12 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
        ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
 }
 
-static void emma2rh_sw_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base);
-}
-
 struct irq_chip emma2rh_sw_irq_controller = {
        .typename = "emma2rh_sw_irq",
        .ack = emma2rh_sw_irq_disable,
        .mask = emma2rh_sw_irq_disable,
        .mask_ack = emma2rh_sw_irq_disable,
        .unmask = emma2rh_sw_irq_enable,
-       .end = emma2rh_sw_irq_end,
 };
 
 void emma2rh_sw_irq_init(u32 irq_base)
index 5c4f50cdf1576e59ff4efa4bc907bdc87d4c65f8..f8d417b5c2bbd0f8bb85662db5139e1fefddeeb3 100644 (file)
@@ -39,19 +39,12 @@ void disable_r4030_irq(unsigned int irq)
        spin_unlock_irqrestore(&r4030_lock, flags);
 }
 
-static void end_r4030_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_r4030_irq(irq);
-}
-
 static struct irq_chip r4030_irq_type = {
        .typename = "R4030",
        .ack = disable_r4030_irq,
        .mask = disable_r4030_irq,
        .mask_ack = disable_r4030_irq,
        .unmask = enable_r4030_irq,
-       .end = end_r4030_irq,
 };
 
 void __init init_r4030_ints(void)
index 4a9f1ecefaf2225f3a60a2f402cfc1328d6fb93a..9b34238d41c09d7a91887469fc3ff02eef223704 100644 (file)
@@ -90,7 +90,6 @@ struct elf_prpsinfo32
        char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
 };
 
-#define elf_addr_t     u32
 #define elf_caddr_t    u32
 #define init_elf_binfmt init_elfn32_binfmt
 
index e318137798958bc03bcf3069b92829d816923cff..993f7ec70f3544cbef175b600fdd71624e1099d8 100644 (file)
@@ -92,7 +92,6 @@ struct elf_prpsinfo32
        char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
 };
 
-#define elf_addr_t     u32
 #define elf_caddr_t    u32
 #define init_elf_binfmt init_elf32_binfmt
 
index 2526c0ca4d817a5b89a04bda1da164bbc07eb252..b59a676c6d0e392cfc19632449d3ffa6cc6b9314 100644 (file)
@@ -19,9 +19,6 @@
 #include <asm/i8259.h>
 #include <asm/io.h>
 
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
-
 /*
  * This is the 'legacy' 8259A Programmable Interrupt Controller,
  * present in the majority of PC/AT boxes.
@@ -31,23 +28,16 @@ void disable_8259A_irq(unsigned int irq);
  * moves to arch independent land
  */
 
+static int i8259A_auto_eoi;
 DEFINE_SPINLOCK(i8259A_lock);
-
-static void end_8259A_irq (unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-           irq_desc[irq].action)
-               enable_8259A_irq(irq);
-}
-
+/* some platforms call this... */
 void mask_and_ack_8259A(unsigned int);
 
-static struct irq_chip i8259A_irq_type = {
-       .typename = "XT-PIC",
-       .enable = enable_8259A_irq,
-       .disable = disable_8259A_irq,
-       .ack = mask_and_ack_8259A,
-       .end = end_8259A_irq,
+static struct irq_chip i8259A_chip = {
+       .name           = "XT-PIC",
+       .mask           = disable_8259A_irq,
+       .unmask         = enable_8259A_irq,
+       .mask_ack       = mask_and_ack_8259A,
 };
 
 /*
@@ -59,8 +49,8 @@ static struct irq_chip i8259A_irq_type = {
  */
 static unsigned int cached_irq_mask = 0xffff;
 
-#define cached_21      (cached_irq_mask)
-#define cached_A1      (cached_irq_mask >> 8)
+#define cached_master_mask     (cached_irq_mask)
+#define cached_slave_mask      (cached_irq_mask >> 8)
 
 void disable_8259A_irq(unsigned int irq)
 {
@@ -70,9 +60,9 @@ void disable_8259A_irq(unsigned int irq)
        spin_lock_irqsave(&i8259A_lock, flags);
        cached_irq_mask |= mask;
        if (irq & 8)
-               outb(cached_A1,0xA1);
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
        else
-               outb(cached_21,0x21);
+               outb(cached_master_mask, PIC_MASTER_IMR);
        spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -84,9 +74,9 @@ void enable_8259A_irq(unsigned int irq)
        spin_lock_irqsave(&i8259A_lock, flags);
        cached_irq_mask &= mask;
        if (irq & 8)
-               outb(cached_A1,0xA1);
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
        else
-               outb(cached_21,0x21);
+               outb(cached_master_mask, PIC_MASTER_IMR);
        spin_unlock_irqrestore(&i8259A_lock, flags);
 }
 
@@ -98,9 +88,9 @@ int i8259A_irq_pending(unsigned int irq)
 
        spin_lock_irqsave(&i8259A_lock, flags);
        if (irq < 8)
-               ret = inb(0x20) & mask;
+               ret = inb(PIC_MASTER_CMD) & mask;
        else
-               ret = inb(0xA0) & (mask >> 8);
+               ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
        spin_unlock_irqrestore(&i8259A_lock, flags);
 
        return ret;
@@ -109,7 +99,7 @@ int i8259A_irq_pending(unsigned int irq)
 void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
-       set_irq_chip(irq, &i8259A_irq_type);
+       set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
        enable_irq(irq);
 }
 
@@ -125,14 +115,14 @@ static inline int i8259A_irq_real(unsigned int irq)
        int irqmask = 1 << irq;
 
        if (irq < 8) {
-               outb(0x0B,0x20);                /* ISR register */
-               value = inb(0x20) & irqmask;
-               outb(0x0A,0x20);                /* back to the IRR register */
+               outb(0x0B,PIC_MASTER_CMD);      /* ISR register */
+               value = inb(PIC_MASTER_CMD) & irqmask;
+               outb(0x0A,PIC_MASTER_CMD);      /* back to the IRR register */
                return value;
        }
-       outb(0x0B,0xA0);                /* ISR register */
-       value = inb(0xA0) & (irqmask >> 8);
-       outb(0x0A,0xA0);                /* back to the IRR register */
+       outb(0x0B,PIC_SLAVE_CMD);       /* ISR register */
+       value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+       outb(0x0A,PIC_SLAVE_CMD);       /* back to the IRR register */
        return value;
 }
 
@@ -149,17 +139,19 @@ void mask_and_ack_8259A(unsigned int irq)
 
        spin_lock_irqsave(&i8259A_lock, flags);
        /*
-        * Lightweight spurious IRQ detection. We do not want to overdo
-        * spurious IRQ handling - it's usually a sign of hardware problems, so
-        * we only do the checks we can do without slowing down good hardware
-        * nnecesserily.
+        * Lightweight spurious IRQ detection. We do not want
+        * to overdo spurious IRQ handling - it's usually a sign
+        * of hardware problems, so we only do the checks we can
+        * do without slowing down good hardware unnecessarily.
         *
-        * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting
-        * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A.
-        * Thus we can check spurious 8259A IRQs without doing the quite slow
-        * i8259A_irq_real() call for every IRQ.  This does not cover 100% of
-        * spurious interrupts, but should be enough to warn the user that
-        * there is something bad going on ...
+        * Note that IRQ7 and IRQ15 (the two spurious IRQs
+        * usually resulting from the 8259A-1|2 PICs) occur
+        * even if the IRQ is masked in the 8259A. Thus we
+        * can check spurious 8259A IRQs without doing the
+        * quite slow i8259A_irq_real() call for every IRQ.
+        * This does not cover 100% of spurious interrupts,
+        * but should be enough to warn the user that there
+        * is something bad going on ...
         */
        if (cached_irq_mask & irqmask)
                goto spurious_8259A_irq;
@@ -167,14 +159,14 @@ void mask_and_ack_8259A(unsigned int irq)
 
 handle_real_irq:
        if (irq & 8) {
-               inb(0xA1);              /* DUMMY - (do we need this?) */
-               outb(cached_A1,0xA1);
-               outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */
-               outb(0x62,0x20);        /* 'Specific EOI' to master-IRQ2 */
+               inb(PIC_SLAVE_IMR);     /* DUMMY - (do we need this?) */
+               outb(cached_slave_mask, PIC_SLAVE_IMR);
+               outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+               outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
        } else {
-               inb(0x21);              /* DUMMY - (do we need this?) */
-               outb(cached_21,0x21);
-               outb(0x60+irq,0x20);    /* 'Specific EOI' to master */
+               inb(PIC_MASTER_IMR);    /* DUMMY - (do we need this?) */
+               outb(cached_master_mask, PIC_MASTER_IMR);
+               outb(0x60+irq,PIC_MASTER_CMD);  /* 'Specific EOI to master */
        }
 #ifdef CONFIG_MIPS_MT_SMTC
         if (irq_hwmask[irq] & ST0_IM)
@@ -195,7 +187,7 @@ spurious_8259A_irq:
                goto handle_real_irq;
 
        {
-               static int spurious_irq_mask = 0;
+               static int spurious_irq_mask;
                /*
                 * At this point we can be sure the IRQ is spurious,
                 * lets ACK and report it. [once per IRQ]
@@ -216,13 +208,25 @@ spurious_8259A_irq:
 
 static int i8259A_resume(struct sys_device *dev)
 {
-       init_8259A(0);
+       init_8259A(i8259A_auto_eoi);
+       return 0;
+}
+
+static int i8259A_shutdown(struct sys_device *dev)
+{
+       /* Put the i8259A into a quiescent state that
+        * the kernel initialization code can get it
+        * out of.
+        */
+       outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
+       outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-1 */
        return 0;
 }
 
 static struct sysdev_class i8259_sysdev_class = {
        set_kset_name("i8259"),
        .resume = i8259A_resume,
+       .shutdown = i8259A_shutdown,
 };
 
 static struct sys_device device_i8259A = {
@@ -244,41 +248,41 @@ void __init init_8259A(int auto_eoi)
 {
        unsigned long flags;
 
+       i8259A_auto_eoi = auto_eoi;
+
        spin_lock_irqsave(&i8259A_lock, flags);
 
-       outb(0xff, 0x21);       /* mask all of 8259A-1 */
-       outb(0xff, 0xA1);       /* mask all of 8259A-2 */
+       outb(0xff, PIC_MASTER_IMR);     /* mask all of 8259A-1 */
+       outb(0xff, PIC_SLAVE_IMR);      /* mask all of 8259A-2 */
 
        /*
         * outb_p - this has to work on a wide range of PC hardware.
         */
-       outb_p(0x11, 0x20);     /* ICW1: select 8259A-1 init */
-       outb_p(0x00, 0x21);     /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */
-       outb_p(0x04, 0x21);     /* 8259A-1 (the master) has a slave on IR2 */
-       if (auto_eoi)
-               outb_p(0x03, 0x21);     /* master does Auto EOI */
-       else
-               outb_p(0x01, 0x21);     /* master expects normal EOI */
-
-       outb_p(0x11, 0xA0);     /* ICW1: select 8259A-2 init */
-       outb_p(0x08, 0xA1);     /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */
-       outb_p(0x02, 0xA1);     /* 8259A-2 is a slave on master's IR2 */
-       outb_p(0x01, 0xA1);     /* (slave's support for AEOI in flat mode
-                                   is to be investigated) */
-
+       outb_p(0x11, PIC_MASTER_CMD);   /* ICW1: select 8259A-1 init */
+       outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);    /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+       outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);   /* 8259A-1 (the master) has a slave on IR2 */
+       if (auto_eoi)   /* master does Auto EOI */
+               outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+       else            /* master expects normal EOI */
+               outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+       outb_p(0x11, PIC_SLAVE_CMD);    /* ICW1: select 8259A-2 init */
+       outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);     /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+       outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);  /* 8259A-2 is a slave on master's IR2 */
+       outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
        if (auto_eoi)
                /*
-                * in AEOI mode we just have to mask the interrupt
+                * In AEOI mode we just have to mask the interrupt
                 * when acking.
                 */
-               i8259A_irq_type.ack = disable_8259A_irq;
+               i8259A_chip.mask_ack = disable_8259A_irq;
        else
-               i8259A_irq_type.ack = mask_and_ack_8259A;
+               i8259A_chip.mask_ack = mask_and_ack_8259A;
 
        udelay(100);            /* wait for 8259A to initialize */
 
-       outb(cached_21, 0x21);  /* restore master IRQ mask */
-       outb(cached_A1, 0xA1);  /* restore slave IRQ mask */
+       outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+       outb(cached_slave_mask, PIC_SLAVE_IMR);   /* restore slave IRQ mask */
 
        spin_unlock_irqrestore(&i8259A_lock, flags);
 }
@@ -291,11 +295,17 @@ static struct irqaction irq2 = {
 };
 
 static struct resource pic1_io_resource = {
-       .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY
+       .name = "pic1",
+       .start = PIC_MASTER_CMD,
+       .end = PIC_MASTER_IMR,
+       .flags = IORESOURCE_BUSY
 };
 
 static struct resource pic2_io_resource = {
-       .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY
+       .name = "pic2",
+       .start = PIC_SLAVE_CMD,
+       .end = PIC_SLAVE_IMR,
+       .flags = IORESOURCE_BUSY
 };
 
 /*
@@ -313,7 +323,7 @@ void __init init_i8259_irqs (void)
        init_8259A(0);
 
        for (i = 0; i < 16; i++)
-               set_irq_chip(i, &i8259A_irq_type);
+               set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
 
-       setup_irq(2, &irq2);
+       setup_irq(PIC_CASCADE_IR, &irq2);
 }
index ab12c8f0151852e74a66f781fe875ffc362b2368..1bbefbf433736dd113dbbacab8a09d0bdd351ad1 100644 (file)
@@ -52,10 +52,6 @@ static struct linux_binfmt irix_format = {
        irix_core_dump, PAGE_SIZE
 };
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 #ifdef DEBUG
 /* Debugging routines. */
 static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@ static int notesize(struct memelfnote *en)
        int sz;
 
        sz = sizeof(struct elf_note);
-       sz += roundup(strlen(en->name), 4);
+       sz += roundup(strlen(en->name) + 1, 4);
        sz += roundup(en->datasz, 4);
 
        return sz;
@@ -1032,7 +1028,7 @@ static int writenote(struct memelfnote *men, struct file *file)
 {
        struct elf_note en;
 
-       en.n_namesz = strlen(men->name);
+       en.n_namesz = strlen(men->name) + 1;
        en.n_descsz = men->datasz;
        en.n_type = men->type;
 
index 6cfb31cafde2806d387f7736fa6eeb636d84dd85..efbd219845b573fbe9d3097f1b69383a7dc8ac13 100644 (file)
@@ -66,15 +66,6 @@ static inline void unmask_mv64340_irq(unsigned int irq)
        }
 }
 
-/*
- * End IRQ processing
- */
-static void end_mv64340_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               unmask_mv64340_irq(irq);
-}
-
 /*
  * Interrupt handler for interrupts coming from the Marvell chip.
  * It could be built in ethernet ports etc...
@@ -106,7 +97,6 @@ struct irq_chip mv64340_irq_type = {
        .mask = mask_mv64340_irq,
        .mask_ack = mask_mv64340_irq,
        .unmask = unmask_mv64340_irq,
-       .end = end_mv64340_irq,
 };
 
 void __init mv64340_irq_init(unsigned int base)
index ddcc2a5f8a066d6cf55525e302597f8500d16871..123324ba8c14ca05c18562bd9cd7c5c25a794c08 100644 (file)
@@ -29,19 +29,12 @@ static inline void mask_rm7k_irq(unsigned int irq)
        clear_c0_intcontrol(0x100 << (irq - irq_base));
 }
 
-static void rm7k_cpu_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               unmask_rm7k_irq(irq);
-}
-
 static struct irq_chip rm7k_irq_controller = {
        .typename = "RM7000",
        .ack = mask_rm7k_irq,
        .mask = mask_rm7k_irq,
        .mask_ack = mask_rm7k_irq,
        .unmask = unmask_rm7k_irq,
-       .end = rm7k_cpu_irq_end,
 };
 
 void __init rm7k_cpu_irq_init(int base)
index ba6440c88abd3a7b3e3e400cf50bb95c7856818c..0e6f4c5349d2e86b0bd5cf2ecb3cfd6c2102ddd4 100644 (file)
@@ -80,19 +80,12 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
        on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
 }
 
-static void rm9k_cpu_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               unmask_rm9k_irq(irq);
-}
-
 static struct irq_chip rm9k_irq_controller = {
        .typename = "RM9000",
        .ack = mask_rm9k_irq,
        .mask = mask_rm9k_irq,
        .mask_ack = mask_rm9k_irq,
        .unmask = unmask_rm9k_irq,
-       .end = rm9k_cpu_irq_end,
 };
 
 static struct irq_chip rm9k_perfcounter_irq = {
@@ -103,7 +96,6 @@ static struct irq_chip rm9k_perfcounter_irq = {
        .mask = mask_rm9k_irq,
        .mask_ack = mask_rm9k_irq,
        .unmask = unmask_rm9k_irq,
-       .end = rm9k_cpu_irq_end,
 };
 
 unsigned int rm9000_perfcount_irq;
index b339798b3172d5e23b89428d40302ab78ba85959..2fe4c868a8016d85aae641a7d7c2888ce1279c14 100644 (file)
@@ -117,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
                for_each_online_cpu(j)
                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
index be5ac23d3812caefbe18948da35df0a06e502551..fcc86b96ccf6097d5490dca91002b806936a9b89 100644 (file)
@@ -50,12 +50,6 @@ static inline void mask_mips_irq(unsigned int irq)
        irq_disable_hazard();
 }
 
-static void mips_cpu_irq_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               unmask_mips_irq(irq);
-}
-
 static struct irq_chip mips_cpu_irq_controller = {
        .typename       = "MIPS",
        .ack            = mask_mips_irq,
@@ -63,7 +57,6 @@ static struct irq_chip mips_cpu_irq_controller = {
        .mask_ack       = mask_mips_irq,
        .unmask         = unmask_mips_irq,
        .eoi            = unmask_mips_irq,
-       .end            = mips_cpu_irq_end,
 };
 
 /*
@@ -96,8 +89,6 @@ static void mips_mt_cpu_irq_ack(unsigned int irq)
        mask_mips_mt_irq(irq);
 }
 
-#define mips_mt_cpu_irq_end mips_cpu_irq_end
-
 static struct irq_chip mips_mt_cpu_irq_controller = {
        .typename       = "MIPS",
        .startup        = mips_mt_cpu_irq_startup,
@@ -106,7 +97,6 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
        .mask_ack       = mips_mt_cpu_irq_ack,
        .unmask         = unmask_mips_mt_irq,
        .eoi            = unmask_mips_mt_irq,
-       .end            = mips_mt_cpu_irq_end,
 };
 
 void __init mips_cpu_irq_init(int irq_base)
index f06a144c788118b935434321c6972facdfb06d3b..2c82412b9efe9ba885419cd80609e754b5d99400 100644 (file)
@@ -319,7 +319,7 @@ static void sp_cleanup(void)
 static int channel_open = 0;
 
 /* the work handler */
-static void sp_work(void *data)
+static void sp_work(struct work_struct *unused)
 {
        if (!channel_open) {
                if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@ static void startwork(int vpe)
                        return;
                }
 
-               INIT_WORK(&work, sp_work, NULL);
+               INIT_WORK(&work, sp_work);
                queue_work(workqueue, &work);
        } else
                queue_work(workqueue, &work);
index 7a3ebbeba1f3aa036fd583e7472aa838595b10aa..b061c9aa6302f4b348b3f689d19f2f39e32df621 100644 (file)
@@ -382,531 +382,6 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
        return ret;
 }
 
-struct msgbuf32 { s32 mtype; char mtext[1]; };
-
-struct ipc_perm32
-{
-       key_t             key;
-        __compat_uid_t  uid;
-        __compat_gid_t  gid;
-        __compat_uid_t  cuid;
-        __compat_gid_t  cgid;
-        compat_mode_t  mode;
-        unsigned short  seq;
-};
-
-struct ipc64_perm32 {
-       key_t key;
-       __compat_uid_t uid;
-       __compat_gid_t gid;
-       __compat_uid_t cuid;
-       __compat_gid_t cgid;
-       compat_mode_t   mode;
-       unsigned short  seq;
-       unsigned short __pad1;
-       unsigned int __unused1;
-       unsigned int __unused2;
-};
-
-struct semid_ds32 {
-        struct ipc_perm32 sem_perm;               /* permissions .. see ipc.h */
-        compat_time_t   sem_otime;              /* last semop time */
-        compat_time_t   sem_ctime;              /* last change time */
-        u32 sem_base;              /* ptr to first semaphore in array */
-        u32 sem_pending;          /* pending operations to be processed */
-        u32 sem_pending_last;    /* last pending operation */
-        u32 undo;                  /* undo requests on this array */
-        unsigned short  sem_nsems;              /* no. of semaphores in array */
-};
-
-struct semid64_ds32 {
-       struct ipc64_perm32     sem_perm;
-       compat_time_t   sem_otime;
-       compat_time_t   sem_ctime;
-       unsigned int            sem_nsems;
-       unsigned int            __unused1;
-       unsigned int            __unused2;
-};
-
-struct msqid_ds32
-{
-        struct ipc_perm32 msg_perm;
-        u32 msg_first;
-        u32 msg_last;
-        compat_time_t   msg_stime;
-        compat_time_t   msg_rtime;
-        compat_time_t   msg_ctime;
-        u32 wwait;
-        u32 rwait;
-        unsigned short msg_cbytes;
-        unsigned short msg_qnum;
-        unsigned short msg_qbytes;
-        compat_ipc_pid_t msg_lspid;
-        compat_ipc_pid_t msg_lrpid;
-};
-
-struct msqid64_ds32 {
-       struct ipc64_perm32 msg_perm;
-       compat_time_t msg_stime;
-       unsigned int __unused1;
-       compat_time_t msg_rtime;
-       unsigned int __unused2;
-       compat_time_t msg_ctime;
-       unsigned int __unused3;
-       unsigned int msg_cbytes;
-       unsigned int msg_qnum;
-       unsigned int msg_qbytes;
-       compat_pid_t msg_lspid;
-       compat_pid_t msg_lrpid;
-       unsigned int __unused4;
-       unsigned int __unused5;
-};
-
-struct shmid_ds32 {
-        struct ipc_perm32       shm_perm;
-        int                     shm_segsz;
-        compat_time_t          shm_atime;
-        compat_time_t          shm_dtime;
-        compat_time_t          shm_ctime;
-        compat_ipc_pid_t    shm_cpid;
-        compat_ipc_pid_t    shm_lpid;
-        unsigned short          shm_nattch;
-};
-
-struct shmid64_ds32 {
-       struct ipc64_perm32     shm_perm;
-       compat_size_t           shm_segsz;
-       compat_time_t           shm_atime;
-       compat_time_t           shm_dtime;
-       compat_time_t shm_ctime;
-       compat_pid_t shm_cpid;
-       compat_pid_t shm_lpid;
-       unsigned int shm_nattch;
-       unsigned int __unused1;
-       unsigned int __unused2;
-};
-
-struct ipc_kludge32 {
-       u32 msgp;
-       s32 msgtyp;
-};
-
-static int
-do_sys32_semctl(int first, int second, int third, void __user *uptr)
-{
-       union semun fourth;
-       u32 pad;
-       int err, err2;
-       struct semid64_ds s;
-       mm_segment_t old_fs;
-
-       if (!uptr)
-               return -EINVAL;
-       err = -EFAULT;
-       if (get_user (pad, (u32 __user *)uptr))
-               return err;
-       if ((third & ~IPC_64) == SETVAL)
-               fourth.val = (int)pad;
-       else
-               fourth.__pad = (void __user *)A(pad);
-       switch (third & ~IPC_64) {
-       case IPC_INFO:
-       case IPC_RMID:
-       case IPC_SET:
-       case SEM_INFO:
-       case GETVAL:
-       case GETPID:
-       case GETNCNT:
-       case GETZCNT:
-       case GETALL:
-       case SETVAL:
-       case SETALL:
-               err = sys_semctl (first, second, third, fourth);
-               break;
-
-       case IPC_STAT:
-       case SEM_STAT:
-               fourth.__pad = (struct semid64_ds __user *)&s;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_semctl(first, second, third | IPC_64, fourth);
-               set_fs(old_fs);
-
-               if (third & IPC_64) {
-                       struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
-
-                       if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
-                       err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
-                       err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
-                       err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
-                       err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
-                       err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
-                       err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
-                       err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
-                       err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
-                       err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
-               } else {
-                       struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
-
-                       if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
-                       err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
-                       err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
-                       err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
-                       err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
-                       err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
-                       err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
-                       err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
-                       err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
-                       err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
-               }
-               if (err2)
-                       err = -EFAULT;
-               break;
-
-       default:
-               err = - EINVAL;
-               break;
-       }
-
-       return err;
-}
-
-static int
-do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
-{
-       struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
-       struct msgbuf *p;
-       mm_segment_t old_fs;
-       int err;
-
-       if (second < 0)
-               return -EINVAL;
-       p = kmalloc (second + sizeof (struct msgbuf)
-                                   + 4, GFP_USER);
-       if (!p)
-               return -ENOMEM;
-       err = get_user (p->mtype, &up->mtype);
-       if (err)
-               goto out;
-       err |= __copy_from_user (p->mtext, &up->mtext, second);
-       if (err)
-               goto out;
-       old_fs = get_fs ();
-       set_fs (KERNEL_DS);
-       err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
-       set_fs (old_fs);
-out:
-       kfree (p);
-
-       return err;
-}
-
-static int
-do_sys32_msgrcv (int first, int second, int msgtyp, int third,
-                int version, void __user *uptr)
-{
-       struct msgbuf32 __user *up;
-       struct msgbuf *p;
-       mm_segment_t old_fs;
-       int err;
-
-       if (!version) {
-               struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
-               struct ipc_kludge32 ipck;
-
-               err = -EINVAL;
-               if (!uptr)
-                       goto out;
-               err = -EFAULT;
-               if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
-                       goto out;
-               uptr = (void __user *)AA(ipck.msgp);
-               msgtyp = ipck.msgtyp;
-       }
-
-       if (second < 0)
-               return -EINVAL;
-       err = -ENOMEM;
-       p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
-       if (!p)
-               goto out;
-       old_fs = get_fs ();
-       set_fs (KERNEL_DS);
-       err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
-       set_fs (old_fs);
-       if (err < 0)
-               goto free_then_out;
-       up = (struct msgbuf32 __user *)uptr;
-       if (put_user (p->mtype, &up->mtype) ||
-           __copy_to_user (&up->mtext, p->mtext, err))
-               err = -EFAULT;
-free_then_out:
-       kfree (p);
-out:
-       return err;
-}
-
-static int
-do_sys32_msgctl (int first, int second, void __user *uptr)
-{
-       int err = -EINVAL, err2;
-       struct msqid64_ds m;
-       struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
-       struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
-       mm_segment_t old_fs;
-
-       switch (second & ~IPC_64) {
-       case IPC_INFO:
-       case IPC_RMID:
-       case MSG_INFO:
-               err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
-               break;
-
-       case IPC_SET:
-               if (second & IPC_64) {
-                       if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
-                       err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
-                       err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
-                       err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
-               } else {
-                       if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
-                       err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
-                       err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
-                       err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
-               }
-               if (err)
-                       break;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-               set_fs(old_fs);
-               break;
-
-       case IPC_STAT:
-       case MSG_STAT:
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
-               set_fs(old_fs);
-               if (second & IPC_64) {
-                       if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
-                       err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
-                       err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
-                       err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
-                       err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
-                       err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
-                       err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
-                       err2 |= __put_user(m.msg_stime, &up64->msg_stime);
-                       err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
-                       err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
-                       err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
-                       err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
-                       err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
-                       err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
-                       err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
-                       if (err2)
-                               err = -EFAULT;
-               } else {
-                       if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
-                       err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
-                       err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
-                       err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
-                       err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
-                       err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
-                       err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
-                       err2 |= __put_user(m.msg_stime, &up32->msg_stime);
-                       err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
-                       err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
-                       err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
-                       err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
-                       err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
-                       err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
-                       err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
-                       if (err2)
-                               err = -EFAULT;
-               }
-               break;
-       }
-
-       return err;
-}
-
-static int
-do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
-{
-       unsigned long raddr;
-       u32 __user *uaddr = (u32 __user *)A((u32)third);
-       int err = -EINVAL;
-
-       if (version == 1)
-               return err;
-       err = do_shmat (first, uptr, second, &raddr);
-       if (err)
-               return err;
-       err = put_user (raddr, uaddr);
-       return err;
-}
-
-struct shm_info32 {
-       int used_ids;
-       u32 shm_tot, shm_rss, shm_swp;
-       u32 swap_attempts, swap_successes;
-};
-
-static int
-do_sys32_shmctl (int first, int second, void __user *uptr)
-{
-       struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
-       struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
-       struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
-       int err = -EFAULT, err2;
-       struct shmid64_ds s64;
-       mm_segment_t old_fs;
-       struct shm_info si;
-       struct shmid_ds s;
-
-       switch (second & ~IPC_64) {
-       case IPC_INFO:
-               second = IPC_INFO; /* So that we don't have to translate it */
-       case IPC_RMID:
-       case SHM_LOCK:
-       case SHM_UNLOCK:
-               err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
-               break;
-       case IPC_SET:
-               if (second & IPC_64) {
-                       err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
-                       err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
-                       err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
-               } else {
-                       err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
-                       err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
-                       err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
-               }
-               if (err)
-                       break;
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
-               set_fs(old_fs);
-               break;
-
-       case IPC_STAT:
-       case SHM_STAT:
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
-               set_fs(old_fs);
-               if (err < 0)
-                       break;
-               if (second & IPC_64) {
-                       if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
-                       err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
-                       err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
-                       err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
-                       err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
-                       err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
-                       err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
-                       err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
-                       err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
-                       err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
-                       err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
-                       err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
-                       err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
-                       err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
-               } else {
-                       if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
-                               err = -EFAULT;
-                               break;
-                       }
-                       err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
-                       err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
-                       err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
-                       err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
-                       err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
-                       err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
-                       err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
-                       err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
-                       err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
-                       err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
-                       err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
-                       err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
-                       err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
-                       err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
-               }
-               if (err2)
-                       err = -EFAULT;
-               break;
-
-       case SHM_INFO:
-               old_fs = get_fs();
-               set_fs(KERNEL_DS);
-               err = sys_shmctl(first, second, (void __user *)&si);
-               set_fs(old_fs);
-               if (err < 0)
-                       break;
-               err2 = put_user(si.used_ids, &uip->used_ids);
-               err2 |= __put_user(si.shm_tot, &uip->shm_tot);
-               err2 |= __put_user(si.shm_rss, &uip->shm_rss);
-               err2 |= __put_user(si.shm_swp, &uip->shm_swp);
-               err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
-               err2 |= __put_user (si.swap_successes, &uip->swap_successes);
-               if (err2)
-                       err = -EFAULT;
-               break;
-
-       default:
-               err = -EINVAL;
-               break;
-       }
-
-       return err;
-}
-
-static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
-                            const struct compat_timespec __user *timeout32)
-{
-       struct compat_timespec t32;
-       struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
-
-       if (copy_from_user(&t32, timeout32, sizeof(t32)))
-               return -EFAULT;
-
-       if (put_user(t32.tv_sec, &t64->tv_sec) ||
-           put_user(t32.tv_nsec, &t64->tv_nsec))
-               return -EFAULT;
-
-       return sys_semtimedop(semid, tsems, nsems, t64);
-}
-
 asmlinkage long
 sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
 {
@@ -918,48 +393,43 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
        switch (call) {
        case SEMOP:
                /* struct sembuf is the same on 32 and 64bit :)) */
-               err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-                                     NULL);
+               err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
                break;
        case SEMTIMEDOP:
-               err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second,
-                                     (const struct compat_timespec __user *)AA(fifth));
+               err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
+                                           compat_ptr(fifth));
                break;
        case SEMGET:
-               err = sys_semget (first, second, third);
+               err = sys_semget(first, second, third);
                break;
        case SEMCTL:
-               err = do_sys32_semctl (first, second, third,
-                                      (void __user *)AA(ptr));
+               err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
                break;
-
        case MSGSND:
-               err = do_sys32_msgsnd (first, second, third,
-                                      (void __user *)AA(ptr));
+               err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
                break;
        case MSGRCV:
-               err = do_sys32_msgrcv (first, second, fifth, third,
-                                      version, (void __user *)AA(ptr));
+               err = compat_sys_msgrcv(first, second, fifth, third,
+                                       version, compat_ptr(ptr));
                break;
        case MSGGET:
-               err = sys_msgget ((key_t) first, second);
+               err = sys_msgget((key_t) first, second);
                break;
        case MSGCTL:
-               err = do_sys32_msgctl (first, second, (void __user *)AA(ptr));
+               err = compat_sys_msgctl(first, second, compat_ptr(ptr));
                break;
-
        case SHMAT:
-               err = do_sys32_shmat (first, second, third,
-                                     version, (void __user *)AA(ptr));
+               err = compat_sys_shmat(first, second, third, version,
+                                      compat_ptr(ptr));
                break;
        case SHMDT:
-               err = sys_shmdt ((char __user *)A(ptr));
+               err = sys_shmdt(compat_ptr(ptr));
                break;
        case SHMGET:
-               err = sys_shmget (first, (unsigned)second, third);
+               err = sys_shmget(first, (unsigned)second, third);
                break;
        case SHMCTL:
-               err = do_sys32_shmctl (first, second, (void __user *)AA(ptr));
+               err = compat_sys_shmctl(first, second, compat_ptr(ptr));
                break;
        default:
                err = -EINVAL;
@@ -969,18 +439,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
        return err;
 }
 
-asmlinkage long sys32_shmat(int shmid, char __user *shmaddr,
-                         int shmflg, int32_t __user *addr)
+#ifdef CONFIG_MIPS32_N32
+asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
 {
-       unsigned long raddr;
-       int err;
-
-       err = do_shmat(shmid, shmaddr, shmflg, &raddr);
-       if (err)
-               return err;
-
-       return put_user(raddr, addr);
+       /* compat_sys_semctl expects a pointer to union semun */
+       u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
+       if (put_user(ptr_to_compat(arg.__pad), uptr))
+               return -EFAULT;
+       return compat_sys_semctl(semid, semnum, cmd, uptr);
 }
+#endif
 
 struct sysctl_args32
 {
index 5b18f265d75b8449f8e7a474d07bdc0d0529ebcd..34567d81f94024105bb5ccee141f3d2344a41edc 100644 (file)
@@ -149,8 +149,8 @@ EXPORT(sysn32_call_table)
        PTR     sys_mincore
        PTR     sys_madvise
        PTR     sys_shmget
-       PTR     sys32_shmat
-       PTR     sys_shmctl                      /* 6030 */
+       PTR     sys_shmat
+       PTR     compat_sys_shmctl                       /* 6030 */
        PTR     sys_dup
        PTR     sys_dup2
        PTR     sys_pause
@@ -184,12 +184,12 @@ EXPORT(sysn32_call_table)
        PTR     sys32_newuname
        PTR     sys_semget
        PTR     sys_semop
-       PTR     sys_semctl
+       PTR     sysn32_semctl
        PTR     sys_shmdt                       /* 6065 */
        PTR     sys_msgget
-       PTR     sys_msgsnd
-       PTR     sys_msgrcv
-       PTR     sys_msgctl
+       PTR     compat_sys_msgsnd
+       PTR     compat_sys_msgrcv
+       PTR     compat_sys_msgctl
        PTR     compat_sys_fcntl                /* 6070 */
        PTR     sys_flock
        PTR     sys_fsync
@@ -335,7 +335,7 @@ EXPORT(sysn32_call_table)
        PTR     compat_sys_fcntl64
        PTR     sys_set_tid_address
        PTR     sys_restart_syscall
-       PTR     sys_semtimedop                  /* 6215 */
+       PTR     compat_sys_semtimedop                   /* 6215 */
        PTR     sys_fadvise64_64
        PTR     compat_sys_statfs64
        PTR     compat_sys_fstatfs64
index 49db516789e07bb6a82d758a94648647c15ec69c..f2a8701e414d72ff9c320e33df6b40100f0adb84 100644 (file)
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
 
        spin_lock(&smp_call_lock);
        call_data = &data;
-       mb();
+       smp_mb();
 
        /* Send a message to all other CPUs and wait for them to respond */
        for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
         * Notify initiating CPU that I've grabbed the data and am
         * about to execute the function.
         */
-       mb();
+       smp_mb();
        atomic_inc(&call_data->started);
 
        /*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
        irq_exit();
 
        if (wait) {
-               mb();
+               smp_mb();
                atomic_inc(&call_data->finished);
        }
 }
index 4a84a7beac531fb50c2e94e5b176b672bc440f89..2affa5ff171c763e76067f96b2c7925a82b4e47d 100644 (file)
@@ -44,19 +44,12 @@ void enable_lasat_irq(unsigned int irq_nr)
        *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
 }
 
-static void end_lasat_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_lasat_irq(irq);
-}
-
 static struct irq_chip lasat_irq_type = {
        .typename = "Lasat",
        .ack = disable_lasat_irq,
        .mask = disable_lasat_irq,
        .mask_ack = disable_lasat_irq,
        .unmask = enable_lasat_irq,
-       .end = end_lasat_irq,
 };
 
 static inline int ls1bit32(unsigned int x)
index ad285786e74b341ea9a9e30192e9dbf625f1a96b..dcd4d2ed2ac45737e148cfd3a5d74460f5ff1cac 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial.o memset.o watch.o
+lib-y  += memset.o watch.o
 
 obj-$(CONFIG_CPU_MIPS32)       += dump_tlb.o
 obj-$(CONFIG_CPU_MIPS64)       += dump_tlb.o
diff --git a/arch/mips/lib-32/csum_partial.S b/arch/mips/lib-32/csum_partial.S
deleted file mode 100644 (file)
index ea257db..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ralf Baechle
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg)                  \
-       addu    sum, reg;               \
-       sltu    v1, sum, reg;           \
-       addu    sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
-       lw      t0, (offset + 0x00)(src); \
-       lw      t1, (offset + 0x04)(src); \
-       lw      t2, (offset + 0x08)(src); \
-       lw      t3, (offset + 0x0c)(src); \
-       ADDC(sum, t0);                    \
-       ADDC(sum, t1);                    \
-       ADDC(sum, t2);                    \
-       ADDC(sum, t3);                    \
-       lw      t0, (offset + 0x10)(src); \
-       lw      t1, (offset + 0x14)(src); \
-       lw      t2, (offset + 0x18)(src); \
-       lw      t3, (offset + 0x1c)(src); \
-       ADDC(sum, t0);                    \
-       ADDC(sum, t1);                    \
-       ADDC(sum, t2);                    \
-       ADDC(sum, t3);                    \
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define dest a1
-#define sum v0
-
-       .text
-       .set    noreorder
-
-/* unknown src alignment and < 8 bytes to go  */
-small_csumcpy:
-       move    a1, t2
-
-       andi    t0, a1, 4
-       beqz    t0, 1f
-        andi   t0, a1, 2
-
-       /* Still a full word to go  */
-       ulw     t1, (src)
-       addiu   src, 4
-       ADDC(sum, t1)
-
-1:     move    t1, zero
-       beqz    t0, 1f
-        andi   t0, a1, 1
-
-       /* Still a halfword to go  */
-       ulhu    t1, (src)
-       addiu   src, 2
-
-1:     beqz    t0, 1f
-        sll    t1, t1, 16
-
-       lbu     t2, (src)
-        nop
-
-#ifdef __MIPSEB__
-       sll     t2, t2, 8
-#endif
-       or      t1, t2
-
-1:     ADDC(sum, t1)
-
-       /* fold checksum */
-       sll     v1, sum, 16
-       addu    sum, v1
-       sltu    v1, sum, v1
-       srl     sum, sum, 16
-       addu    sum, v1
-
-       /* odd buffer alignment? */
-       beqz    t7, 1f
-        nop
-       sll     v1, sum, 8
-       srl     sum, sum, 8
-       or      sum, v1
-       andi    sum, 0xffff
-1:
-       .set    reorder
-       /* Add the passed partial csum.  */
-       ADDC(sum, a2)
-       jr      ra
-       .set    noreorder
-
-/* ------------------------------------------------------------------------- */
-
-       .align  5
-LEAF(csum_partial)
-       move    sum, zero
-       move    t7, zero
-
-       sltiu   t8, a1, 0x8
-       bnez    t8, small_csumcpy               /* < 8 bytes to copy */
-        move   t2, a1
-
-       beqz    a1, out
-        andi   t7, src, 0x1                    /* odd buffer? */
-
-hword_align:
-       beqz    t7, word_align
-        andi   t8, src, 0x2
-
-       lbu     t0, (src)
-       subu    a1, a1, 0x1
-#ifdef __MIPSEL__
-       sll     t0, t0, 8
-#endif
-       ADDC(sum, t0)
-       addu    src, src, 0x1
-       andi    t8, src, 0x2
-
-word_align:
-       beqz    t8, dword_align
-        sltiu  t8, a1, 56
-
-       lhu     t0, (src)
-       subu    a1, a1, 0x2
-       ADDC(sum, t0)
-       sltiu   t8, a1, 56
-       addu    src, src, 0x2
-
-dword_align:
-       bnez    t8, do_end_words
-        move   t8, a1
-
-       andi    t8, src, 0x4
-       beqz    t8, qword_align
-        andi   t8, src, 0x8
-
-       lw      t0, 0x00(src)
-       subu    a1, a1, 0x4
-       ADDC(sum, t0)
-       addu    src, src, 0x4
-       andi    t8, src, 0x8
-
-qword_align:
-       beqz    t8, oword_align
-        andi   t8, src, 0x10
-
-       lw      t0, 0x00(src)
-       lw      t1, 0x04(src)
-       subu    a1, a1, 0x8
-       ADDC(sum, t0)
-       ADDC(sum, t1)
-       addu    src, src, 0x8
-       andi    t8, src, 0x10
-
-oword_align:
-       beqz    t8, begin_movement
-        srl    t8, a1, 0x7
-
-       lw      t3, 0x08(src)
-       lw      t4, 0x0c(src)
-       lw      t0, 0x00(src)
-       lw      t1, 0x04(src)
-       ADDC(sum, t3)
-       ADDC(sum, t4)
-       ADDC(sum, t0)
-       ADDC(sum, t1)
-       subu    a1, a1, 0x10
-       addu    src, src, 0x10
-       srl     t8, a1, 0x7
-
-begin_movement:
-       beqz    t8, 1f
-        andi   t2, a1, 0x40
-
-move_128bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-       CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
-       CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
-       CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
-       subu    t8, t8, 0x01
-       bnez    t8, move_128bytes
-        addu   src, src, 0x80
-
-1:
-       beqz    t2, 1f
-        andi   t2, a1, 0x20
-
-move_64bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-       CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
-       addu    src, src, 0x40
-
-1:
-       beqz    t2, do_end_words
-        andi   t8, a1, 0x1c
-
-move_32bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
-       andi    t8, a1, 0x1c
-       addu    src, src, 0x20
-
-do_end_words:
-       beqz    t8, maybe_end_cruft
-        srl    t8, t8, 0x2
-
-end_words:
-       lw      t0, (src)
-       subu    t8, t8, 0x1
-       ADDC(sum, t0)
-       bnez    t8, end_words
-        addu   src, src, 0x4
-
-maybe_end_cruft:
-       andi    t2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, t2
-       beqz    t2, out
-        move   a1, t2
-
-end_bytes:
-       lb      t0, (src)
-       subu    a1, a1, 0x1
-       bnez    a2, end_bytes
-        addu   src, src, 0x1
-
-out:
-       jr      ra
-        move   v0, sum
-       END(csum_partial)
index ad285786e74b341ea9a9e30192e9dbf625f1a96b..dcd4d2ed2ac45737e148cfd3a5d74460f5ff1cac 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial.o memset.o watch.o
+lib-y  += memset.o watch.o
 
 obj-$(CONFIG_CPU_MIPS32)       += dump_tlb.o
 obj-$(CONFIG_CPU_MIPS64)       += dump_tlb.o
diff --git a/arch/mips/lib-64/csum_partial.S b/arch/mips/lib-64/csum_partial.S
deleted file mode 100644 (file)
index 25aba66..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Quick'n'dirty IP checksum ...
- *
- * Copyright (C) 1998, 1999 Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-#define ADDC(sum,reg)                                          \
-       addu    sum, reg;                                       \
-       sltu    v1, sum, reg;                                   \
-       addu    sum, v1
-
-#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3)                \
-       lw      t0, (offset + 0x00)(src);                       \
-       lw      t1, (offset + 0x04)(src);                       \
-       lw      t2, (offset + 0x08)(src);                       \
-       lw      t3, (offset + 0x0c)(src);                       \
-       ADDC(sum, t0);                                          \
-       ADDC(sum, t1);                                          \
-       ADDC(sum, t2);                                          \
-       ADDC(sum, t3);                                          \
-       lw      t0, (offset + 0x10)(src);                       \
-       lw      t1, (offset + 0x14)(src);                       \
-       lw      t2, (offset + 0x18)(src);                       \
-       lw      t3, (offset + 0x1c)(src);                       \
-       ADDC(sum, t0);                                          \
-       ADDC(sum, t1);                                          \
-       ADDC(sum, t2);                                          \
-       ADDC(sum, t3);                                          \
-
-/*
- * a0: source address
- * a1: length of the area to checksum
- * a2: partial checksum
- */
-
-#define src a0
-#define sum v0
-
-       .text
-       .set    noreorder
-
-/* unknown src alignment and < 8 bytes to go  */
-small_csumcpy:
-       move    a1, ta2
-
-       andi    ta0, a1, 4
-       beqz    ta0, 1f
-        andi   ta0, a1, 2
-
-       /* Still a full word to go  */
-       ulw     ta1, (src)
-       daddiu  src, 4
-       ADDC(sum, ta1)
-
-1:     move    ta1, zero
-       beqz    ta0, 1f
-        andi   ta0, a1, 1
-
-       /* Still a halfword to go  */
-       ulhu    ta1, (src)
-       daddiu  src, 2
-
-1:     beqz    ta0, 1f
-        sll    ta1, ta1, 16
-
-       lbu     ta2, (src)
-        nop
-
-#ifdef __MIPSEB__
-       sll     ta2, ta2, 8
-#endif
-       or      ta1, ta2
-
-1:     ADDC(sum, ta1)
-
-       /* fold checksum */
-       sll     v1, sum, 16
-       addu    sum, v1
-       sltu    v1, sum, v1
-       srl     sum, sum, 16
-       addu    sum, v1
-
-       /* odd buffer alignment? */
-       beqz    t3, 1f
-        nop
-       sll     v1, sum, 8
-       srl     sum, sum, 8
-       or      sum, v1
-       andi    sum, 0xffff
-1:
-       .set    reorder
-       /* Add the passed partial csum.  */
-       ADDC(sum, a2)
-       jr      ra
-       .set    noreorder
-
-/* ------------------------------------------------------------------------- */
-
-       .align  5
-LEAF(csum_partial)
-       move    sum, zero
-       move    t3, zero
-
-       sltiu   t8, a1, 0x8
-       bnez    t8, small_csumcpy               /* < 8 bytes to copy */
-        move   ta2, a1
-
-       beqz    a1, out
-        andi   t3, src, 0x1                    /* odd buffer? */
-
-hword_align:
-       beqz    t3, word_align
-        andi   t8, src, 0x2
-
-       lbu     ta0, (src)
-       dsubu   a1, a1, 0x1
-#ifdef __MIPSEL__
-       sll     ta0, ta0, 8
-#endif
-       ADDC(sum, ta0)
-       daddu   src, src, 0x1
-       andi    t8, src, 0x2
-
-word_align:
-       beqz    t8, dword_align
-        sltiu  t8, a1, 56
-
-       lhu     ta0, (src)
-       dsubu   a1, a1, 0x2
-       ADDC(sum, ta0)
-       sltiu   t8, a1, 56
-       daddu   src, src, 0x2
-
-dword_align:
-       bnez    t8, do_end_words
-        move   t8, a1
-
-       andi    t8, src, 0x4
-       beqz    t8, qword_align
-        andi   t8, src, 0x8
-
-       lw      ta0, 0x00(src)
-       dsubu   a1, a1, 0x4
-       ADDC(sum, ta0)
-       daddu   src, src, 0x4
-       andi    t8, src, 0x8
-
-qword_align:
-       beqz    t8, oword_align
-        andi   t8, src, 0x10
-
-       lw      ta0, 0x00(src)
-       lw      ta1, 0x04(src)
-       dsubu   a1, a1, 0x8
-       ADDC(sum, ta0)
-       ADDC(sum, ta1)
-       daddu   src, src, 0x8
-       andi    t8, src, 0x10
-
-oword_align:
-       beqz    t8, begin_movement
-        dsrl   t8, a1, 0x7
-
-       lw      ta3, 0x08(src)
-       lw      t0, 0x0c(src)
-       lw      ta0, 0x00(src)
-       lw      ta1, 0x04(src)
-       ADDC(sum, ta3)
-       ADDC(sum, t0)
-       ADDC(sum, ta0)
-       ADDC(sum, ta1)
-       dsubu   a1, a1, 0x10
-       daddu   src, src, 0x10
-       dsrl    t8, a1, 0x7
-
-begin_movement:
-       beqz    t8, 1f
-        andi   ta2, a1, 0x40
-
-move_128bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-       CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
-       CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
-       CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
-       dsubu   t8, t8, 0x01
-       bnez    t8, move_128bytes
-        daddu  src, src, 0x80
-
-1:
-       beqz    ta2, 1f
-        andi   ta2, a1, 0x20
-
-move_64bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-       CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
-       daddu   src, src, 0x40
-
-1:
-       beqz    ta2, do_end_words
-        andi   t8, a1, 0x1c
-
-move_32bytes:
-       CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
-       andi    t8, a1, 0x1c
-       daddu   src, src, 0x20
-
-do_end_words:
-       beqz    t8, maybe_end_cruft
-        dsrl   t8, t8, 0x2
-
-end_words:
-       lw      ta0, (src)
-       dsubu   t8, t8, 0x1
-       ADDC(sum, ta0)
-       bnez    t8, end_words
-        daddu  src, src, 0x4
-
-maybe_end_cruft:
-       andi    ta2, a1, 0x3
-
-small_memcpy:
- j small_csumcpy; move a1, ta2         /* XXX ??? */
-       beqz    t2, out
-        move   a1, ta2
-
-end_bytes:
-       lb      ta0, (src)
-       dsubu   a1, a1, 0x1
-       bnez    a2, end_bytes
-        daddu  src, src, 0x1
-
-out:
-       jr      ra
-        move   v0, sum
-       END(csum_partial)
index b225543f5302dc2ed7ad3ea4cc28daa84d40c3e0..888b61ea12feb4d58354ff62a28e87d2d5914c56 100644 (file)
@@ -2,8 +2,8 @@
 # Makefile for MIPS-specific library files..
 #
 
-lib-y  += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
-          strnlen_user.o uncached.o
+lib-y  += csum_partial.o csum_partial_copy.o memcpy.o promlib.o \
+          strlen_user.o strncpy_user.o strnlen_user.o uncached.o
 
 obj-y  += iomap.o
 
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644 (file)
index 0000000..15611d9
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_64BIT
+#define T0     ta0
+#define T1     ta1
+#define T2     ta2
+#define T3     ta3
+#define T4     t0
+#define T7     t3
+#else
+#define T0     t0
+#define T1     t1
+#define T2     t2
+#define T3     t3
+#define T4     t4
+#define T7     t7
+#endif
+
+#define ADDC(sum,reg)                                          \
+       addu    sum, reg;                                       \
+       sltu    v1, sum, reg;                                   \
+       addu    sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)    \
+       lw      _t0, (offset + 0x00)(src);                      \
+       lw      _t1, (offset + 0x04)(src);                      \
+       lw      _t2, (offset + 0x08)(src);                      \
+       lw      _t3, (offset + 0x0c)(src);                      \
+       ADDC(sum, _t0);                                         \
+       ADDC(sum, _t1);                                         \
+       ADDC(sum, _t2);                                         \
+       ADDC(sum, _t3);                                         \
+       lw      _t0, (offset + 0x10)(src);                      \
+       lw      _t1, (offset + 0x14)(src);                      \
+       lw      _t2, (offset + 0x18)(src);                      \
+       lw      _t3, (offset + 0x1c)(src);                      \
+       ADDC(sum, _t0);                                         \
+       ADDC(sum, _t1);                                         \
+       ADDC(sum, _t2);                                         \
+       ADDC(sum, _t3);                                         \
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+       .text
+       .set    noreorder
+
+/* unknown src alignment and < 8 bytes to go  */
+small_csumcpy:
+       move    a1, T2
+
+       andi    T0, a1, 4
+       beqz    T0, 1f
+        andi   T0, a1, 2
+
+       /* Still a full word to go  */
+       ulw     T1, (src)
+       PTR_ADDIU       src, 4
+       ADDC(sum, T1)
+
+1:     move    T1, zero
+       beqz    T0, 1f
+        andi   T0, a1, 1
+
+       /* Still a halfword to go  */
+       ulhu    T1, (src)
+       PTR_ADDIU       src, 2
+
+1:     beqz    T0, 1f
+        sll    T1, T1, 16
+
+       lbu     T2, (src)
+        nop
+
+#ifdef __MIPSEB__
+       sll     T2, T2, 8
+#endif
+       or      T1, T2
+
+1:     ADDC(sum, T1)
+
+       /* fold checksum */
+       sll     v1, sum, 16
+       addu    sum, v1
+       sltu    v1, sum, v1
+       srl     sum, sum, 16
+       addu    sum, v1
+
+       /* odd buffer alignment? */
+       beqz    T7, 1f
+        nop
+       sll     v1, sum, 8
+       srl     sum, sum, 8
+       or      sum, v1
+       andi    sum, 0xffff
+1:
+       .set    reorder
+       /* Add the passed partial csum.  */
+       ADDC(sum, a2)
+       jr      ra
+       .set    noreorder
+
+/* ------------------------------------------------------------------------- */
+
+       .align  5
+LEAF(csum_partial)
+       move    sum, zero
+       move    T7, zero
+
+       sltiu   t8, a1, 0x8
+       bnez    t8, small_csumcpy               /* < 8 bytes to copy */
+        move   T2, a1
+
+       beqz    a1, out
+        andi   T7, src, 0x1                    /* odd buffer? */
+
+hword_align:
+       beqz    T7, word_align
+        andi   t8, src, 0x2
+
+       lbu     T0, (src)
+       LONG_SUBU       a1, a1, 0x1
+#ifdef __MIPSEL__
+       sll     T0, T0, 8
+#endif
+       ADDC(sum, T0)
+       PTR_ADDU        src, src, 0x1
+       andi    t8, src, 0x2
+
+word_align:
+       beqz    t8, dword_align
+        sltiu  t8, a1, 56
+
+       lhu     T0, (src)
+       LONG_SUBU       a1, a1, 0x2
+       ADDC(sum, T0)
+       sltiu   t8, a1, 56
+       PTR_ADDU        src, src, 0x2
+
+dword_align:
+       bnez    t8, do_end_words
+        move   t8, a1
+
+       andi    t8, src, 0x4
+       beqz    t8, qword_align
+        andi   t8, src, 0x8
+
+       lw      T0, 0x00(src)
+       LONG_SUBU       a1, a1, 0x4
+       ADDC(sum, T0)
+       PTR_ADDU        src, src, 0x4
+       andi    t8, src, 0x8
+
+qword_align:
+       beqz    t8, oword_align
+        andi   t8, src, 0x10
+
+       lw      T0, 0x00(src)
+       lw      T1, 0x04(src)
+       LONG_SUBU       a1, a1, 0x8
+       ADDC(sum, T0)
+       ADDC(sum, T1)
+       PTR_ADDU        src, src, 0x8
+       andi    t8, src, 0x10
+
+oword_align:
+       beqz    t8, begin_movement
+        LONG_SRL       t8, a1, 0x7
+
+       lw      T3, 0x08(src)
+       lw      T4, 0x0c(src)
+       lw      T0, 0x00(src)
+       lw      T1, 0x04(src)
+       ADDC(sum, T3)
+       ADDC(sum, T4)
+       ADDC(sum, T0)
+       ADDC(sum, T1)
+       LONG_SUBU       a1, a1, 0x10
+       PTR_ADDU        src, src, 0x10
+       LONG_SRL        t8, a1, 0x7
+
+begin_movement:
+       beqz    t8, 1f
+        andi   T2, a1, 0x40
+
+move_128bytes:
+       CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+       CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+       CSUM_BIGCHUNK(src, 0x40, sum, T0, T1, T3, T4)
+       CSUM_BIGCHUNK(src, 0x60, sum, T0, T1, T3, T4)
+       LONG_SUBU       t8, t8, 0x01
+       bnez    t8, move_128bytes
+        PTR_ADDU       src, src, 0x80
+
+1:
+       beqz    T2, 1f
+        andi   T2, a1, 0x20
+
+move_64bytes:
+       CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+       CSUM_BIGCHUNK(src, 0x20, sum, T0, T1, T3, T4)
+       PTR_ADDU        src, src, 0x40
+
+1:
+       beqz    T2, do_end_words
+        andi   t8, a1, 0x1c
+
+move_32bytes:
+       CSUM_BIGCHUNK(src, 0x00, sum, T0, T1, T3, T4)
+       andi    t8, a1, 0x1c
+       PTR_ADDU        src, src, 0x20
+
+do_end_words:
+       beqz    t8, maybe_end_cruft
+        LONG_SRL       t8, t8, 0x2
+
+end_words:
+       lw      T0, (src)
+       LONG_SUBU       t8, t8, 0x1
+       ADDC(sum, T0)
+       bnez    t8, end_words
+        PTR_ADDU       src, src, 0x4
+
+maybe_end_cruft:
+       andi    T2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, T2          /* XXX ??? */
+       beqz    t2, out
+        move   a1, T2
+
+end_bytes:
+       lb      T0, (src)
+       LONG_SUBU       a1, a1, 0x1
+       bnez    a2, end_bytes
+        PTR_ADDU       src, src, 0x1
+
+out:
+       jr      ra
+        move   v0, sum
+       END(csum_partial)
index 7fa5fd16e46bfe846b5c47063d7d85e5dab54f57..5697c6e250a37d0b69baf49cb1a792b95081a318 100644 (file)
@@ -190,14 +190,14 @@ int dma_supported(struct device *dev, u64 mask)
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
index 8da19fd22ac6f0bbbf5b64d3408e611b5f215f99..f088344db4659e2b4e6229464e00d0ad6068a904 100644 (file)
@@ -197,14 +197,14 @@ int dma_supported(struct device *dev, u64 mask)
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
index ec54ed0d26ff23fbf257e6e6193cf9a8eeb06815..b42b6f7456e6c552721f2b44762c74c518d68d28 100644 (file)
@@ -363,14 +363,15 @@ int dma_supported(struct device *dev, u64 mask)
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction direction)
 {
        if (direction == DMA_NONE)
                return;
index 2eeffe5c2a3a40315c69cd0a6defc907cb4eb292..8cecef0957c359ff55827b85b91b5b74e74a4648 100644 (file)
@@ -299,14 +299,15 @@ int dma_supported(struct device *dev, u64 mask)
 
 EXPORT_SYMBOL(dma_supported);
 
-int dma_is_consistent(dma_addr_t dma_addr)
+int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 1;
 }
 
 EXPORT_SYMBOL(dma_is_consistent);
 
-void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction direction)
 {
        if (direction == DMA_NONE)
                return;
index 99ebf3ccc222b78ef8743b9f0bb5b6cba15d894c..675502ada5a27d5f958e6b58bea8e4219ff77ff1 100644 (file)
@@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
        unsigned long vaddr;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
+       pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
@@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
        enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
        if (vaddr < FIXADDR_START) { // FIXME
-               dec_preempt_count();
-               preempt_check_resched();
+               pagefault_enable();
                return;
        }
 
@@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
        local_flush_tlb_one(vaddr);
 #endif
 
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 #ifndef CONFIG_LIMITED_DMA
@@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
index e5a4a0a8a7f05821c6b03a11a2531fcb02663f6f..bb11fef08472b1b0b89a5d4ddb980ced32a3ac5a 100644 (file)
@@ -65,15 +65,6 @@ static inline void unmask_cpci_irq(unsigned int irq)
        value = OCELOT_FPGA_READ(INTMASK);
 }
 
-/*
- * End IRQ processing
- */
-static void end_cpci_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               unmask_cpci_irq(irq);
-}
-
 /*
  * Interrupt handler for interrupts coming from the FPGA chip.
  * It could be built in ethernet ports etc...
@@ -98,7 +89,6 @@ struct irq_chip cpci_irq_type = {
        .mask = mask_cpci_irq,
        .mask_ack = mask_cpci_irq,
        .unmask = unmask_cpci_irq,
-       .end = end_cpci_irq,
 };
 
 void cpci_irq_init(void)
index 0029f0008deac076d76a5c9dcbf27704671b4aab..a7a80c0da569653bb68169652e0c10a892b1d088 100644 (file)
@@ -59,15 +59,6 @@ static inline void unmask_uart_irq(unsigned int irq)
        value = OCELOT_FPGA_READ(UART_INTMASK);
 }
 
-/*
- * End IRQ processing
- */
-static void end_uart_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               unmask_uart_irq(irq);
-}
-
 /*
  * Interrupt handler for interrupts coming from the FPGA chip.
  */
@@ -91,7 +82,6 @@ struct irq_chip uart_irq_type = {
        .mask = mask_uart_irq,
        .mask_ack = mask_uart_irq,
        .unmask = unmask_uart_irq,
-       .end = end_uart_irq,
 };
 
 void uart_irq_init(void)
index 0dc23930edbdf4fcdf0bd6ad81210534d1151e8c..2c36c108c4d62c7e0bff9029159e396a9089afa5 100644 (file)
@@ -158,20 +158,12 @@ int pnx8550_set_gic_priority(int irq, int priority)
        return prev_priority;
 }
 
-static void end_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
-               unmask_irq(irq);
-       }
-}
-
 static struct irq_chip level_irq_type = {
        .typename =     "PNX Level IRQ",
        .ack =          mask_irq,
        .mask =         mask_irq,
        .mask_ack =     mask_irq,
        .unmask =       unmask_irq,
-       .end =          end_irq,
 };
 
 static struct irqaction gic_action = {
index c7b138053159f5d9f82ada27ac860f7404734445..c44f8be0644f43bf58d38cb744d2509724dac2ee 100644 (file)
@@ -51,19 +51,12 @@ static void disable_local0_irq(unsigned int irq)
        sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
 }
 
-static void end_local0_irq (unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_local0_irq(irq);
-}
-
 static struct irq_chip ip22_local0_irq_type = {
        .typename       = "IP22 local 0",
        .ack            = disable_local0_irq,
        .mask           = disable_local0_irq,
        .mask_ack       = disable_local0_irq,
        .unmask         = enable_local0_irq,
-       .end            = end_local0_irq,
 };
 
 static void enable_local1_irq(unsigned int irq)
@@ -79,19 +72,12 @@ void disable_local1_irq(unsigned int irq)
        sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
 }
 
-static void end_local1_irq (unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_local1_irq(irq);
-}
-
 static struct irq_chip ip22_local1_irq_type = {
        .typename       = "IP22 local 1",
        .ack            = disable_local1_irq,
        .mask           = disable_local1_irq,
        .mask_ack       = disable_local1_irq,
        .unmask         = enable_local1_irq,
-       .end            = end_local1_irq,
 };
 
 static void enable_local2_irq(unsigned int irq)
@@ -107,19 +93,12 @@ void disable_local2_irq(unsigned int irq)
                sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
 }
 
-static void end_local2_irq (unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_local2_irq(irq);
-}
-
 static struct irq_chip ip22_local2_irq_type = {
        .typename       = "IP22 local 2",
        .ack            = disable_local2_irq,
        .mask           = disable_local2_irq,
        .mask_ack       = disable_local2_irq,
        .unmask         = enable_local2_irq,
-       .end            = end_local2_irq,
 };
 
 static void enable_local3_irq(unsigned int irq)
@@ -135,19 +114,12 @@ void disable_local3_irq(unsigned int irq)
                sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
 }
 
-static void end_local3_irq (unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_local3_irq(irq);
-}
-
 static struct irq_chip ip22_local3_irq_type = {
        .typename       = "IP22 local 3",
        .ack            = disable_local3_irq,
        .mask           = disable_local3_irq,
        .mask_ack       = disable_local3_irq,
        .unmask         = enable_local3_irq,
-       .end            = end_local3_irq,
 };
 
 static void indy_local0_irqdispatch(void)
index 5f8835b4e84ad0fafaedcca7c1e3251062fa1a61..319f8803ef6f13b5dcf8536f469c2fd9784c4cbf 100644 (file)
@@ -332,13 +332,6 @@ static inline void disable_bridge_irq(unsigned int irq)
        intr_disconnect_level(cpu, swlevel);
 }
 
-static void end_bridge_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
-           irq_desc[irq].action)
-               enable_bridge_irq(irq);
-}
-
 static struct irq_chip bridge_irq_type = {
        .typename       = "bridge",
        .startup        = startup_bridge_irq,
@@ -347,7 +340,6 @@ static struct irq_chip bridge_irq_type = {
        .mask           = disable_bridge_irq,
        .mask_ack       = disable_bridge_irq,
        .unmask         = enable_bridge_irq,
-       .end            = end_bridge_irq,
 };
 
 void __devinit register_bridge_irq(unsigned int irq)
index 7d361726bbfb33ab0d2a8c15ba2b96a57e0337e0..c20e9899b34b49fba2d95cdfa5688ea242287080 100644 (file)
@@ -180,10 +180,6 @@ static void disable_rt_irq(unsigned int irq)
 {
 }
 
-static void end_rt_irq(unsigned int irq)
-{
-}
-
 static struct irq_chip rt_irq_type = {
        .typename       = "SN HUB RT timer",
        .ack            = disable_rt_irq,
@@ -191,7 +187,6 @@ static struct irq_chip rt_irq_type = {
        .mask_ack       = disable_rt_irq,
        .unmask         = enable_rt_irq,
        .eoi            = enable_rt_irq,
-       .end            = end_rt_irq,
 };
 
 static struct irqaction rt_irqaction = {
index ac342f5643c9d3481d1cd921355b4959114f34e9..defa1f1452adb6d3c6a93134ddfafad908761218 100644 (file)
@@ -43,7 +43,7 @@
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 #include <asm/sibyte/sb1250_regs.h>
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 #include <asm/sibyte/sb1250_genbus.h>
 #include <asm/sibyte/board.h>
@@ -53,7 +53,7 @@ extern void bcm1480_setup(void);
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
 extern void sb1250_setup(void);
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 
 extern int xicor_probe(void);
@@ -90,7 +90,7 @@ void __init plat_timer_setup(struct irqaction *irq)
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
        sb1250_time_init();
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 }
 
@@ -111,7 +111,7 @@ void __init plat_mem_setup(void)
 #elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
        sb1250_setup();
 #else
-#error invalid SiByte board configuation
+#error invalid SiByte board configuration
 #endif
 
        panic_timeout = 5;  /* For debug.  */
index 21873de49aa80425645ac9963cfd6dff1c0ff1fd..ed4a19adf36156dfff654ec8ec830ea10ad92120 100644 (file)
 #define TX4927_IRQ_CP0_INIT     ( 1 << 10 )
 #define TX4927_IRQ_CP0_ENABLE   ( 1 << 13 )
 #define TX4927_IRQ_CP0_DISABLE  ( 1 << 14 )
-#define TX4927_IRQ_CP0_ENDIRQ   ( 1 << 16 )
 
 #define TX4927_IRQ_PIC_INIT     ( 1 << 20 )
 #define TX4927_IRQ_PIC_ENABLE   ( 1 << 23 )
 #define TX4927_IRQ_PIC_DISABLE  ( 1 << 24 )
-#define TX4927_IRQ_PIC_ENDIRQ   ( 1 << 26 )
 
 #define TX4927_IRQ_ALL         0xffffffff
 #endif
@@ -82,12 +80,10 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
                                          | TX4927_IRQ_WARN | TX4927_IRQ_EROR
 //                                       | TX4927_IRQ_CP0_INIT
 //                                       | TX4927_IRQ_CP0_ENABLE
-//                                       | TX4927_IRQ_CP0_DISABLE
 //                                       | TX4927_IRQ_CP0_ENDIRQ
 //                                       | TX4927_IRQ_PIC_INIT
 //                                       | TX4927_IRQ_PIC_ENABLE
 //                                       | TX4927_IRQ_PIC_DISABLE
-//                                       | TX4927_IRQ_PIC_ENDIRQ
 //                                       | TX4927_IRQ_INIT
 //                                       | TX4927_IRQ_NEST1
 //                                       | TX4927_IRQ_NEST2
@@ -114,11 +110,9 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
 
 static void tx4927_irq_cp0_enable(unsigned int irq);
 static void tx4927_irq_cp0_disable(unsigned int irq);
-static void tx4927_irq_cp0_end(unsigned int irq);
 
 static void tx4927_irq_pic_enable(unsigned int irq);
 static void tx4927_irq_pic_disable(unsigned int irq);
-static void tx4927_irq_pic_end(unsigned int irq);
 
 /*
  * Kernel structs for all pic's
@@ -131,7 +125,6 @@ static struct irq_chip tx4927_irq_cp0_type = {
        .mask           = tx4927_irq_cp0_disable,
        .mask_ack       = tx4927_irq_cp0_disable,
        .unmask         = tx4927_irq_cp0_enable,
-       .end            = tx4927_irq_cp0_end,
 };
 
 #define TX4927_PIC_NAME "TX4927-PIC"
@@ -141,7 +134,6 @@ static struct irq_chip tx4927_irq_pic_type = {
        .mask           = tx4927_irq_pic_disable,
        .mask_ack       = tx4927_irq_pic_disable,
        .unmask         = tx4927_irq_pic_enable,
-       .end            = tx4927_irq_pic_end,
 };
 
 #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL }
@@ -214,15 +206,6 @@ static void tx4927_irq_cp0_disable(unsigned int irq)
        tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0);
 }
 
-static void tx4927_irq_cp0_end(unsigned int irq)
-{
-       TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq);
-
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               tx4927_irq_cp0_enable(irq);
-       }
-}
-
 /*
  * Functions for pic
  */
@@ -376,15 +359,6 @@ static void tx4927_irq_pic_disable(unsigned int irq)
                              tx4927_irq_pic_mask(irq), 0);
 }
 
-static void tx4927_irq_pic_end(unsigned int irq)
-{
-       TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq);
-
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               tx4927_irq_pic_enable(irq);
-       }
-}
-
 /*
  * Main init functions
  */
index 34cdb2a240e991869fd0294fbb839be1ea5b240d..5a5ea6c0b9f6d2b49531c187945414c357aeba48 100644 (file)
@@ -153,7 +153,6 @@ JP7 is not bus master -- do NOT use -- only 4 pci bus master's allowed -- SouthB
 #define TOSHIBA_RBTX4927_IRQ_IOC_INIT      ( 1 << 10 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE    ( 1 << 13 )
 #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE   ( 1 << 14 )
-#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ    ( 1 << 16 )
 
 #define TOSHIBA_RBTX4927_IRQ_ISA_INIT      ( 1 << 20 )
 #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE    ( 1 << 23 )
@@ -172,7 +171,6 @@ static const u32 toshiba_rbtx4927_irq_debug_flag =
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_INIT
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE
-//                                                 | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_INIT
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE
 //                                                 | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE
@@ -223,7 +221,6 @@ extern void mask_and_ack_8259A(unsigned int irq);
 
 static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq);
 
 #ifdef CONFIG_TOSHIBA_FPCIB0
 static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq);
@@ -239,7 +236,6 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
        .mask = toshiba_rbtx4927_irq_ioc_disable,
        .mask_ack = toshiba_rbtx4927_irq_ioc_disable,
        .unmask = toshiba_rbtx4927_irq_ioc_enable,
-       .end = toshiba_rbtx4927_irq_ioc_end,
 };
 #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000
 #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006
@@ -388,23 +384,6 @@ static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
        TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
 }
 
-static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
-{
-       TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ,
-                                    "irq=%d\n", irq);
-
-       if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
-           || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
-               TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
-                                            "bad irq=%d\n", irq);
-               panic("\n");
-       }
-
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               toshiba_rbtx4927_irq_ioc_enable(irq);
-       }
-}
-
 
 /**********************************************************************************/
 /* Functions for isa                                                              */
index 42e127683ae961f0b18c17ef940b1e31e964c47f..a347b424d91c76d9157c0f17a576576bc5242768 100644 (file)
 
 static void tx4938_irq_cp0_enable(unsigned int irq);
 static void tx4938_irq_cp0_disable(unsigned int irq);
-static void tx4938_irq_cp0_end(unsigned int irq);
 
 static void tx4938_irq_pic_enable(unsigned int irq);
 static void tx4938_irq_pic_disable(unsigned int irq);
-static void tx4938_irq_pic_end(unsigned int irq);
 
 /**********************************************************************************/
 /* Kernel structs for all pic's                                                   */
@@ -56,7 +54,6 @@ static struct irq_chip tx4938_irq_cp0_type = {
        .mask = tx4938_irq_cp0_disable,
        .mask_ack = tx4938_irq_cp0_disable,
        .unmask = tx4938_irq_cp0_enable,
-       .end = tx4938_irq_cp0_end,
 };
 
 #define TX4938_PIC_NAME "TX4938-PIC"
@@ -66,7 +63,6 @@ static struct irq_chip tx4938_irq_pic_type = {
        .mask = tx4938_irq_pic_disable,
        .mask_ack = tx4938_irq_pic_disable,
        .unmask = tx4938_irq_pic_enable,
-       .end = tx4938_irq_pic_end,
 };
 
 static struct irqaction tx4938_irq_pic_action = {
@@ -104,14 +100,6 @@ tx4938_irq_cp0_disable(unsigned int irq)
        clear_c0_status(tx4938_irq_cp0_mask(irq));
 }
 
-static void
-tx4938_irq_cp0_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               tx4938_irq_cp0_enable(irq);
-       }
-}
-
 /**********************************************************************************/
 /* Functions for pic                                                              */
 /**********************************************************************************/
@@ -269,14 +257,6 @@ tx4938_irq_pic_disable(unsigned int irq)
                              tx4938_irq_pic_mask(irq), 0);
 }
 
-static void
-tx4938_irq_pic_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               tx4938_irq_pic_enable(irq);
-       }
-}
-
 /**********************************************************************************/
 /* Main init functions                                                            */
 /**********************************************************************************/
index 8c87a35f30682405abea6261152916000ec50d46..b6f363d08011271deab6cb4405fd8084d1dc24cf 100644 (file)
@@ -89,7 +89,6 @@ IRQ  Device
 
 static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
 static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
-static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq);
 
 #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
 static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
@@ -98,7 +97,6 @@ static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
        .mask = toshiba_rbtx4938_irq_ioc_disable,
        .mask_ack = toshiba_rbtx4938_irq_ioc_disable,
        .unmask = toshiba_rbtx4938_irq_ioc_enable,
-       .end = toshiba_rbtx4938_irq_ioc_end,
 };
 
 #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
@@ -167,14 +165,6 @@ toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
        TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 }
 
-static void
-toshiba_rbtx4938_irq_ioc_end(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
-               toshiba_rbtx4938_irq_ioc_enable(irq);
-       }
-}
-
 extern void __init txx9_spi_irqinit(int irc_irq);
 
 void __init arch_init_irq(void)
index 92f41f6f934a3bea38e9b9184ec68c9c4710370d..c8dfd8092cab72c6d0a1fc39098a30a0c55e9c4c 100644 (file)
@@ -6,6 +6,7 @@ config CASIO_E55
        select ISA
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config IBM_WORKPAD
        bool "Support for IBM WorkPad z50"
@@ -15,6 +16,7 @@ config IBM_WORKPAD
        select ISA
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config NEC_CMBVR4133
        bool "Support for NEC CMB-VR4133"
@@ -39,6 +41,7 @@ config TANBAC_TB022X
        select IRQ_CPU
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
        help
          The TANBAC VR4131 multichip module(TB0225) and
          the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms
@@ -71,6 +74,7 @@ config VICTOR_MPC30X
        select IRQ_CPU
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config ZAO_CAPCELLA
        bool "Support for ZAO Networks Capcella"
@@ -80,6 +84,7 @@ config ZAO_CAPCELLA
        select IRQ_CPU
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config PCI_VR41XX
        bool "Add PCI control unit support of NEC VR4100 series"
index 54b92a74c7ac2cad60c6a0985758208f7c5b671e..c075261976c58df81ec4f4f1106b1c5bfe1c0739 100644 (file)
@@ -427,19 +427,12 @@ static void enable_sysint1_irq(unsigned int irq)
        icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
 }
 
-static void end_sysint1_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint1_irq_type = {
        .typename       = "SYSINT1",
        .ack            = disable_sysint1_irq,
        .mask           = disable_sysint1_irq,
        .mask_ack       = disable_sysint1_irq,
        .unmask         = enable_sysint1_irq,
-       .end            = end_sysint1_irq,
 };
 
 static void disable_sysint2_irq(unsigned int irq)
@@ -452,19 +445,12 @@ static void enable_sysint2_irq(unsigned int irq)
        icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
 }
 
-static void end_sysint2_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-               icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
-}
-
 static struct irq_chip sysint2_irq_type = {
        .typename       = "SYSINT2",
        .ack            = disable_sysint2_irq,
        .mask           = disable_sysint2_irq,
        .mask_ack       = disable_sysint2_irq,
        .unmask         = enable_sysint2_irq,
-       .end            = end_sysint2_irq,
 };
 
 static inline int set_sysint1_assign(unsigned int irq, unsigned char assign)
index 1e64e7b88110157df77334416f7d8a703bea4449..ecb10a4f63c6af15324cb7343951701b04aaba1f 100644 (file)
@@ -75,7 +75,6 @@ struct elf_prpsinfo32
        char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
 };
 
-#define elf_addr_t     unsigned int
 #define init_elf_binfmt init_elf32_binfmt
 
 #define ELF_PLATFORM  ("PARISC32\0")
index 64785e46f93b777297f23d7ae5139fe7bfb280c3..641f9c920eeef58e0e627467db3b6bf2c06b731a 100644 (file)
@@ -152,7 +152,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        const struct exception_table_entry *fix;
        unsigned long acc_type;
 
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        down_read(&mm->mmap_sem);
diff --git a/arch/powerpc/.gitignore b/arch/powerpc/.gitignore
new file mode 100644 (file)
index 0000000..a1a869c
--- /dev/null
@@ -0,0 +1 @@
+include
index 116d7d3683edb98777398df7d033300a79b0d738..291c95ac4b313d4f791367f583ead200a4b11788 100644 (file)
@@ -112,7 +112,7 @@ choice
        default 6xx
 
 config CLASSIC32
-       bool "6xx/7xx/74xx"
+       bool "52xx/6xx/7xx/74xx"
        select PPC_FPU
        select 6xx
        help
@@ -121,16 +121,18 @@ config CLASSIC32
          versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
          embedded versions (403 and 405) and the high end 64 bit Power
          processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
+
+         This option is the catch-all for 6xx types, including some of the
+         embedded versions.  Unless there is see an option for the specific
+         chip family you are using, you want this option.
+         
+         You do not want this if you are building a kernel for a 64 bit
+         IBM RS/6000 or an Apple G5, choose 6xx.
+         
+         If unsure, select this option
          
-         Unless you are building a kernel for one of the embedded processor
-         systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
          Note that the kernel runs in 32-bit mode even on 64-bit chips.
 
-config PPC_52xx
-       bool "Freescale 52xx"
-       select 6xx
-       select PPC_FPU
-       
 config PPC_82xx
        bool "Freescale 82xx"
        select 6xx
@@ -160,9 +162,11 @@ config PPC_86xx
 
 config 40x
        bool "AMCC 40x"
+       select PPC_DCR_NATIVE
 
 config 44x
        bool "AMCC 44x"
+       select PPC_DCR_NATIVE
 
 config 8xx
        bool "Freescale 8xx"
@@ -208,6 +212,24 @@ config PPC_FPU
        bool
        default y if PPC64
 
+config PPC_DCR_NATIVE
+       bool
+       default n
+
+config PPC_DCR_MMIO
+       bool
+       default n
+
+config PPC_DCR
+       bool
+       depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
+       default y
+
+config PPC_OF_PLATFORM_PCI
+       bool
+       depends on PPC64 # not supported on 32 bits yet
+       default n
+
 config BOOKE
        bool
        depends on E200 || E500
@@ -227,6 +249,7 @@ config PTE_64BIT
 config PHYS_64BIT
        bool 'Large physical address support' if E500
        depends on 44x || E500
+       select RESOURCES_64BIT
        default y if 44x
        ---help---
          This option enables kernel support for larger than 32-bit physical
@@ -369,11 +392,13 @@ config PPC_PSERIES
        select PPC_RTAS
        select RTAS_ERROR_LOGGING
        select PPC_UDBG_16550
+       select PPC_NATIVE
        default y
 
 config PPC_ISERIES
        bool "IBM Legacy iSeries"
        depends on PPC_MULTIPLATFORM && PPC64
+       select PPC_INDIRECT_IO
 
 config PPC_CHRP
        bool "Common Hardware Reference Platform (CHRP) based machines"
@@ -384,14 +409,35 @@ config PPC_CHRP
        select PPC_RTAS
        select PPC_MPC106
        select PPC_UDBG_16550
+       select PPC_NATIVE
+       default y
+
+config PPC_MPC52xx
+       bool
+       default n
+
+config PPC_EFIKA
+       bool "bPlan Efika 5k2. MPC5200B based computer"
+       depends on PPC_MULTIPLATFORM && PPC32
+       select PPC_RTAS
+       select RTAS_PROC
+       select PPC_MPC52xx
+       select PPC_NATIVE
        default y
 
+config PPC_LITE5200
+       bool "Freescale Lite5200 Eval Board"
+       depends on PPC_MULTIPLATFORM && PPC32
+       select PPC_MPC52xx
+       default n
+
 config PPC_PMAC
        bool "Apple PowerMac based machines"
        depends on PPC_MULTIPLATFORM
        select MPIC
        select PPC_INDIRECT_PCI if PPC32
        select PPC_MPC106 if PPC32
+       select PPC_NATIVE
        default y
 
 config PPC_PMAC64
@@ -411,6 +457,7 @@ config PPC_PREP
        select PPC_I8259
        select PPC_INDIRECT_PCI
        select PPC_UDBG_16550
+       select PPC_NATIVE
        default y
 
 config PPC_MAPLE
@@ -422,6 +469,7 @@ config PPC_MAPLE
        select GENERIC_TBSYNC
        select PPC_UDBG_16550
        select PPC_970_NAP
+       select PPC_NATIVE
        default n
        help
           This option enables support for the Maple 970FX Evaluation Board.
@@ -434,6 +482,7 @@ config PPC_PASEMI
        select MPIC
        select PPC_UDBG_16550
        select GENERIC_TBSYNC
+       select PPC_NATIVE
        help
          This option enables support for PA Semi's PWRficient line
          of SoC processors, including PA6T-1682M
@@ -445,6 +494,11 @@ config PPC_CELL
 config PPC_CELL_NATIVE
        bool
        select PPC_CELL
+       select PPC_DCR_MMIO
+       select PPC_OF_PLATFORM_PCI
+       select PPC_INDIRECT_IO
+       select PPC_NATIVE
+       select MPIC
        default n
 
 config PPC_IBM_CELL_BLADE
@@ -456,6 +510,22 @@ config PPC_IBM_CELL_BLADE
        select PPC_UDBG_16550
        select UDBG_RTAS_CONSOLE
 
+config PPC_PS3
+       bool "Sony PS3"
+       depends on PPC_MULTIPLATFORM && PPC64
+       select PPC_CELL
+       help
+         This option enables support for the Sony PS3 game console
+         and other platforms using the PS3 hypervisor.
+
+config PPC_NATIVE
+       bool
+       depends on PPC_MULTIPLATFORM
+       help
+         Support for running natively on the hardware, i.e. without
+         a hypervisor. This option is not user-selectable but should
+         be selected by all platforms that need it.
+
 config UDBG_RTAS_CONSOLE
        bool "RTAS based debug console"
        depends on PPC_RTAS
@@ -517,6 +587,15 @@ config PPC_970_NAP
        bool
        default n
 
+config PPC_INDIRECT_IO
+       bool
+       select GENERIC_IOMAP
+       default n
+
+config GENERIC_IOMAP
+       bool
+       default n
+
 source "drivers/cpufreq/Kconfig"
 
 config CPU_FREQ_PMAC
@@ -594,12 +673,6 @@ config TAU_AVERAGE
 
          If in doubt, say N here.
 
-config PPC_TODC
-       depends on EMBEDDED6xx
-       bool "Generic Time-of-day Clock (TODC) support"
-       ---help---
-         This adds support for many TODC/RTC chips.
-
 endmenu
 
 source arch/powerpc/platforms/embedded6xx/Kconfig
@@ -610,6 +683,7 @@ source arch/powerpc/platforms/85xx/Kconfig
 source arch/powerpc/platforms/86xx/Kconfig
 source arch/powerpc/platforms/8xx/Kconfig
 source arch/powerpc/platforms/cell/Kconfig
+source arch/powerpc/platforms/ps3/Kconfig
 
 menu "Kernel options"
 
@@ -790,7 +864,6 @@ source "arch/powerpc/platforms/prep/Kconfig"
 
 config CMDLINE_BOOL
        bool "Default bootloader kernel arguments"
-       depends on !PPC_ISERIES
 
 config CMDLINE
        string "Initial kernel command string"
@@ -880,7 +953,7 @@ config MCA
 
 config PCI
        bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
-               || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2
+               || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES) || MPC7448HPC2 || PPC_PS3
        default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx \
                && !PPC_85xx && !PPC_86xx
        default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
index 5ad149b47e340cf3ef85b15fc23f6a10f0f9656a..f0e51edde022454fc4581449275c5acf6687b86d 100644 (file)
@@ -77,7 +77,7 @@ config KGDB_CONSOLE
 
 config XMON
        bool "Include xmon kernel debugger"
-       depends on DEBUGGER && !PPC_ISERIES
+       depends on DEBUGGER
        help
          Include in-kernel hooks for the xmon kernel monitor/debugger.
          Unless you are intending to debug the kernel, say N here.
@@ -98,6 +98,15 @@ config XMON_DEFAULT
          xmon is normally disabled unless booted with 'xmon=on'.
          Use 'xmon=off' to disable xmon init during runtime.
 
+config XMON_DISASSEMBLY
+       bool "Include disassembly support in xmon"
+       depends on XMON
+       default y
+       help
+         Include support for disassembling in xmon. You probably want
+         to say Y here, unless you're building for a memory-constrained
+         system.
+
 config IRQSTACKS
        bool "Use separate kernel stacks when processing interrupts"
        depends on PPC64
@@ -116,7 +125,7 @@ config BDI_SWITCH
 
 config BOOTX_TEXT
        bool "Support for early boot text console (BootX or OpenFirmware only)"
-       depends PPC_OF && !PPC_ISERIES
+       depends PPC_OF
        help
          Say Y here to see progress messages from the boot firmware in text
          mode. Requires either BootX or Open Firmware.
index 45c9ad23526eca8e5f80c2b1fd7e820727bb0d8b..0734b2fc1d957f4150dfeb341c30988d5d5abb2f 100644 (file)
@@ -1,19 +1,32 @@
 addnote
+empty.c
+hack-coff
 infblock.c
 infblock.h
 infcodes.c
 infcodes.h
 inffast.c
 inffast.h
+inffixed.h
 inflate.c
+inflate.h
 inftrees.c
 inftrees.h
 infutil.c
 infutil.h
 kernel-vmlinux.strip.c
 kernel-vmlinux.strip.gz
+mktree
 uImage
 zImage
+zImage.chrp
+zImage.coff
+zImage.coff.lds
+zImage.lds
+zImage.miboot
+zImage.pmac
+zImage.pseries
+zImage.sandpoint
 zImage.vmode
 zconf.h
 zlib.h
index 4b2be611f77f350c44304731ccf0a065520d7702..343dbcfdf08a6484425666e3038cf2f6fd5a6e17 100644 (file)
@@ -40,7 +40,8 @@ zliblinuxheader := zlib.h zconf.h zutil.h
 $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) \
                $(addprefix $(obj)/,$(zlibheader))
 
-src-wlib := string.S stdio.c main.c div64.S $(zlib)
+src-wlib := string.S stdio.c main.c flatdevtree.c flatdevtree_misc.c \
+               ns16550.c serial.c simple_alloc.c div64.S util.S $(zlib)
 src-plat := of.c
 src-boot := crt0.S $(src-wlib) $(src-plat) empty.c
 
@@ -74,7 +75,7 @@ $(obj)/zImage.lds $(obj)/zImage.coff.lds: $(obj)/%: $(srctree)/$(src)/%.S
        @cp $< $@
 
 clean-files := $(zlib) $(zlibheader) $(zliblinuxheader) \
-               $(obj)/empty.c
+               empty.c zImage zImage.coff.lds zImage.lds zImage.sandpoint
 
 quiet_cmd_bootcc = BOOTCC  $@
       cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -93,13 +94,13 @@ $(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S
 $(obj)/wrapper.a: $(obj-wlib)
        $(call cmd,bootar)
 
-hostprogs-y    := addnote addRamDisk hack-coff
+hostprogs-y    := addnote addRamDisk hack-coff mktree
 
 extra-y                := $(obj)/crt0.o $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
                   $(obj)/zImage.lds $(obj)/zImage.coff.lds
 
 wrapper                :=$(srctree)/$(src)/wrapper
-wrapperbits    := $(extra-y) $(addprefix $(obj)/,addnote hack-coff)
+wrapperbits    := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree)
 
 #############
 # Bits for building various flavours of zImage
@@ -148,13 +149,18 @@ $(obj)/zImage.miboot: vmlinux $(wrapperbits)
 $(obj)/zImage.initrd.miboot: vmlinux $(wrapperbits)
        $(call cmd,wrap_initrd,miboot)
 
+$(obj)/zImage.ps3: vmlinux
+       $(STRIP) -s -R .comment $< -o $@
+
 $(obj)/uImage: vmlinux $(wrapperbits)
        $(call cmd,wrap,uboot)
 
 image-$(CONFIG_PPC_PSERIES)            += zImage.pseries
 image-$(CONFIG_PPC_MAPLE)              += zImage.pseries
 image-$(CONFIG_PPC_IBM_CELL_BLADE)     += zImage.pseries
+image-$(CONFIG_PPC_PS3)                        += zImage.ps3
 image-$(CONFIG_PPC_CHRP)               += zImage.chrp
+image-$(CONFIG_PPC_EFIKA)              += zImage.chrp
 image-$(CONFIG_PPC_PMAC)               += zImage.pmac
 image-$(CONFIG_DEFAULT_UIMAGE)         += uImage
 
@@ -176,3 +182,4 @@ install: $(CONFIGURE) $(image-y)
 
 clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.strip.gz)
 clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.bin.gz)
+clean-files += $(image-)
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
new file mode 100644 (file)
index 0000000..d06b0b0
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Device Tree Souce for Buffalo KuroboxHG
+ *
+ * Choose CONFIG_LINKSTATION to build a kernel for KuroboxHG, or use
+ * the default configuration linkstation_defconfig.
+ *
+ * Based on sandpoint.dts
+ *
+ * 2006 (c) G. Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+
+XXXX add flash parts, rtc, ??
+
+build with: "dtc -f -I dts -O dtb -o kuroboxHG.dtb -V 16 kuroboxHG.dts"
+
+
+ */
+
+/ {
+       linux,phandle = <1000>;
+       model = "KuroboxHG";
+       compatible = "linkstation";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               linux,phandle = <2000>;
+               #cpus = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               PowerPC,603e { /* Really 8241 */
+                       linux,phandle = <2100>;
+                       linux,boot-cpu;
+                       device_type = "cpu";
+                       reg = <0>;
+                       clock-frequency = <fdad680>;    /* Fixed by bootwrapper */
+                       timebase-frequency = <1F04000>; /* Fixed by bootwrapper */
+                       bus-frequency = <0>;            /* From bootloader */
+                       /* Following required by dtc but not used */
+                       i-cache-line-size = <0>;
+                       d-cache-line-size = <0>;
+                       i-cache-size = <4000>;
+                       d-cache-size = <4000>;
+               };
+       };
+
+       memory {
+               linux,phandle = <3000>;
+               device_type = "memory";
+               reg = <00000000 08000000>;
+       };
+
+       soc10x { /* AFAICT need to make soc for 8245's uarts to be defined */
+               linux,phandle = <4000>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               #interrupt-cells = <2>;
+               device_type = "soc";
+               compatible = "mpc10x";
+               store-gathering = <0>; /* 0 == off, !0 == on */
+               reg = <80000000 00100000>;
+               ranges = <80000000 80000000 70000000    /* pci mem space */
+                         fc000000 fc000000 00100000    /* EUMB */
+                         fe000000 fe000000 00c00000    /* pci i/o space */
+                         fec00000 fec00000 00300000    /* pci cfg regs */
+                         fef00000 fef00000 00100000>;  /* pci iack */
+
+               i2c@80003000 {
+                       linux,phandle = <4300>;
+                       device_type = "i2c";
+                       compatible = "fsl-i2c";
+                       reg = <80003000 1000>;
+                       interrupts = <5 2>;
+                       interrupt-parent = <4400>;
+               };
+
+               serial@80004500 {
+                       linux,phandle = <4511>;
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <80004500 8>;
+                       clock-frequency = <7c044a8>;
+                       current-speed = <2580>;
+                       interrupts = <9 2>;
+                       interrupt-parent = <4400>;
+               };
+
+               serial@80004600 {
+                       linux,phandle = <4512>;
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <80004600 8>;
+                       clock-frequency = <7c044a8>;
+                       current-speed = <e100>;
+                       interrupts = <a 0>;
+                       interrupt-parent = <4400>;
+               };
+
+               pic@80040000 {
+                       linux,phandle = <4400>;
+                       #interrupt-cells = <2>;
+                       #address-cells = <0>;
+                       device_type = "open-pic";
+                       compatible = "chrp,open-pic";
+                       interrupt-controller;
+                       reg = <80040000 40000>;
+                       built-in;
+               };
+
+               pci@fec00000 {
+                       linux,phandle = <4500>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       #interrupt-cells = <1>;
+                       device_type = "pci";
+                       compatible = "mpc10x-pci";
+                       reg = <fec00000 400000>;
+                       ranges = <01000000 0        0 fe000000 0 00c00000
+                                 02000000 0 80000000 80000000 0 70000000>;
+                       bus-range = <0 ff>;
+                       clock-frequency = <7f28155>;
+                       interrupt-parent = <4400>;
+                       interrupt-map-mask = <f800 0 0 7>;
+                       interrupt-map = <
+                               /* IDSEL 0x11 - IRQ0 ETH */
+                               5800 0 0 1 4400 0 1
+                               5800 0 0 2 4400 1 1
+                               5800 0 0 3 4400 2 1
+                               5800 0 0 4 4400 3 1
+                               /* IDSEL 0x12 - IRQ1 IDE0 */
+                               6000 0 0 1 4400 1 1
+                               6000 0 0 2 4400 2 1
+                               6000 0 0 3 4400 3 1
+                               6000 0 0 4 4400 0 1
+                               /* IDSEL 0x14 - IRQ3 USB2.0 */
+                               7000 0 0 1 4400 3 1
+                               7000 0 0 2 4400 3 1
+                               7000 0 0 3 4400 3 1
+                               7000 0 0 4 4400 3 1
+                       >;
+               };
+       };
+};
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
new file mode 100644 (file)
index 0000000..8bc0d25
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * Lite5200 board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/ {
+       model = "Lite5200";
+       compatible = "lite5200\0lite52xx\0mpc5200\0mpc52xx";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #cpus = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               PowerPC,5200@0 {
+                       device_type = "cpu";
+                       reg = <0>;
+                       d-cache-line-size = <20>;
+                       i-cache-line-size = <20>;
+                       d-cache-size = <4000>;          // L1, 16K
+                       i-cache-size = <4000>;          // L1, 16K
+                       timebase-frequency = <0>;       // from bootloader
+                       bus-frequency = <0>;            // from bootloader
+                       clock-frequency = <0>;          // from bootloader
+                       32-bit;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <00000000 04000000>;      // 64MB
+       };
+
+       soc5200@f0000000 {
+               #interrupt-cells = <3>;
+               device_type = "soc";
+               ranges = <0 f0000000 f0010000>;
+               reg = <f0000000 00010000>;
+               bus-frequency = <0>;            // from bootloader
+
+               cdm@200 {
+                       compatible = "mpc5200-cdm\0mpc52xx-cdm";
+                       reg = <200 38>;
+               };
+
+               pic@500 {
+                       // 5200 interrupts are encoded into two levels;
+                       linux,phandle = <500>;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       device_type = "interrupt-controller";
+                       compatible = "mpc5200-pic\0mpc52xx-pic";
+                       reg = <500 80>;
+                       built-in;
+               };
+
+               gpt@600 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <600 10>;
+                       interrupts = <1 9 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@610 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <610 10>;
+                       interrupts = <1 a 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@620 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <620 10>;
+                       interrupts = <1 b 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@630 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <630 10>;
+                       interrupts = <1 c 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@640 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <640 10>;
+                       interrupts = <1 d 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@650 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <650 10>;
+                       interrupts = <1 e 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@660 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <660 10>;
+                       interrupts = <1 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@670 {       // General Purpose Timer
+                       compatible = "mpc5200-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <670 10>;
+                       interrupts = <1 10 0>;
+                       interrupt-parent = <500>;
+               };
+
+               rtc@800 {       // Real time clock
+                       compatible = "mpc5200-rtc\0mpc52xx-rtc";
+                       device_type = "rtc";
+                       reg = <800 100>;
+                       interrupts = <1 5 0 1 6 0>;
+                       interrupt-parent = <500>;
+               };
+
+               mscan@900 {
+                       device_type = "mscan";
+                       compatible = "mpc5200-mscan\0mpc52xx-mscan";
+                       interrupts = <2 11 0>;
+                       interrupt-parent = <500>;
+                       reg = <900 80>;
+               };
+
+               mscan@980 {
+                       device_type = "mscan";
+                       compatible = "mpc5200-mscan\0mpc52xx-mscan";
+                       interrupts = <1 12 0>;
+                       interrupt-parent = <500>;
+                       reg = <980 80>;
+               };
+
+               gpio@b00 {
+                       compatible = "mpc5200-gpio\0mpc52xx-gpio";
+                       reg = <b00 40>;
+                       interrupts = <1 7 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpio-wkup@b00 {
+                       compatible = "mpc5200-gpio-wkup\0mpc52xx-gpio-wkup";
+                       reg = <c00 40>;
+                       interrupts = <1 8 0 0 3 0>;
+                       interrupt-parent = <500>;
+               };
+
+               pci@0d00 {
+                       #interrupt-cells = <1>;
+                       #size-cells = <2>;
+                       #address-cells = <3>;
+                       device_type = "pci";
+                       compatible = "mpc5200-pci\0mpc52xx-pci";
+                       reg = <d00 100>;
+                       interrupt-map-mask = <f800 0 0 7>;
+                       interrupt-map = <c000 0 0 1 500 0 0 3
+                                        c000 0 0 2 500 0 0 3
+                                        c000 0 0 3 500 0 0 3
+                                        c000 0 0 4 500 0 0 3>;
+                       clock-frequency = <0>; // From boot loader
+                       interrupts = <2 8 0 2 9 0 2 a 0>;
+                       interrupt-parent = <500>;
+                       bus-range = <0 0>;
+                       ranges = <42000000 0 80000000 80000000 0 20000000
+                                 02000000 0 a0000000 a0000000 0 10000000
+                                 01000000 0 00000000 b0000000 0 01000000>;
+               };
+
+               spi@f00 {
+                       device_type = "spi";
+                       compatible = "mpc5200-spi\0mpc52xx-spi";
+                       reg = <f00 20>;
+                       interrupts = <2 d 0 2 e 0>;
+                       interrupt-parent = <500>;
+               };
+
+               usb@1000 {
+                       device_type = "usb-ohci-be";
+                       compatible = "mpc5200-ohci\0mpc52xx-ohci\0ohci-be";
+                       reg = <1000 ff>;
+                       interrupts = <2 6 0>;
+                       interrupt-parent = <500>;
+               };
+
+               bestcomm@1200 {
+                       device_type = "dma-controller";
+                       compatible = "mpc5200-bestcomm\0mpc52xx-bestcomm";
+                       reg = <1200 80>;
+                       interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+                                     3 4 0  3 5 0  3 6 0  3 7 0
+                                     3 8 0  3 9 0  3 a 0  3 b 0
+                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               xlb@1f00 {
+                       compatible = "mpc5200-xlb\0mpc52xx-xlb";
+                       reg = <1f00 100>;
+               };
+
+               serial@2000 {           // PSC1
+                       device_type = "serial";
+                       compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+                       port-number = <0>;  // Logical port assignment
+                       reg = <2000 100>;
+                       interrupts = <2 1 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC2 in spi mode example
+               spi@2200 {              // PSC2
+                       device_type = "spi";
+                       compatible = "mpc5200-psc-spi\0mpc52xx-psc-spi";
+                       reg = <2200 100>;
+                       interrupts = <2 2 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC3 in CODEC mode example
+               i2s@2400 {              // PSC3
+                       device_type = "i2s";
+                       compatible = "mpc5200-psc-i2s\0mpc52xx-psc-i2s";
+                       reg = <2400 100>;
+                       interrupts = <2 3 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC4 unconfigured
+               //serial@2600 {         // PSC4
+               //      device_type = "serial";
+               //      compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+               //      reg = <2600 100>;
+               //      interrupts = <2 b 0>;
+               //      interrupt-parent = <500>;
+               //};
+
+               // PSC5 unconfigured
+               //serial@2800 {         // PSC5
+               //      device_type = "serial";
+               //      compatible = "mpc5200-psc-uart\0mpc52xx-psc-uart";
+               //      reg = <2800 100>;
+               //      interrupts = <2 c 0>;
+               //      interrupt-parent = <500>;
+               //};
+
+               // PSC6 in AC97 mode example
+               ac97@2c00 {             // PSC6
+                       device_type = "ac97";
+                       compatible = "mpc5200-psc-ac97\0mpc52xx-psc-ac97";
+                       reg = <2c00 100>;
+                       interrupts = <2 4 0>;
+                       interrupt-parent = <500>;
+               };
+
+               ethernet@3000 {
+                       device_type = "network";
+                       compatible = "mpc5200-fec\0mpc52xx-fec";
+                       reg = <3000 800>;
+                       mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+                       interrupts = <2 5 0>;
+                       interrupt-parent = <500>;
+               };
+
+               ata@3a00 {
+                       device_type = "ata";
+                       compatible = "mpc5200-ata\0mpc52xx-ata";
+                       reg = <3a00 100>;
+                       interrupts = <2 7 0>;
+                       interrupt-parent = <500>;
+               };
+
+               i2c@3d00 {
+                       device_type = "i2c";
+                       compatible = "mpc5200-i2c\0mpc52xx-i2c";
+                       reg = <3d00 40>;
+                       interrupts = <2 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               i2c@3d40 {
+                       device_type = "i2c";
+                       compatible = "mpc5200-i2c\0mpc52xx-i2c";
+                       reg = <3d40 40>;
+                       interrupts = <2 10 0>;
+                       interrupt-parent = <500>;
+               };
+               sram@8000 {
+                       device_type = "sram";
+                       compatible = "mpc5200-sram\0mpc52xx-sram\0sram";
+                       reg = <8000 4000>;
+               };
+       };
+};
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
new file mode 100644 (file)
index 0000000..81cb764
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ * Lite5200B board Device Tree Source
+ *
+ * Copyright 2006 Secret Lab Technologies Ltd.
+ * Grant Likely <grant.likely@secretlab.ca>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/ {
+       model = "Lite5200b";
+       compatible = "lite5200b\0lite52xx\0mpc5200b\0mpc52xx";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #cpus = <1>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               PowerPC,5200@0 {
+                       device_type = "cpu";
+                       reg = <0>;
+                       d-cache-line-size = <20>;
+                       i-cache-line-size = <20>;
+                       d-cache-size = <4000>;          // L1, 16K
+                       i-cache-size = <4000>;          // L1, 16K
+                       timebase-frequency = <0>;       // from bootloader
+                       bus-frequency = <0>;            // from bootloader
+                       clock-frequency = <0>;          // from bootloader
+                       32-bit;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <00000000 10000000>;      // 256MB
+       };
+
+       soc5200@f0000000 {
+               #interrupt-cells = <3>;
+               device_type = "soc";
+               ranges = <0 f0000000 f0010000>;
+               reg = <f0000000 00010000>;
+               bus-frequency = <0>;            // from bootloader
+
+               cdm@200 {
+                       compatible = "mpc5200b-cdm\0mpc52xx-cdm";
+                       reg = <200 38>;
+               };
+
+               pic@500 {
+                       // 5200 interrupts are encoded into two levels;
+                       linux,phandle = <500>;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       device_type = "interrupt-controller";
+                       compatible = "mpc5200b-pic\0mpc52xx-pic";
+                       reg = <500 80>;
+                       built-in;
+               };
+
+               gpt@600 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <600 10>;
+                       interrupts = <1 9 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@610 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <610 10>;
+                       interrupts = <1 a 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@620 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <620 10>;
+                       interrupts = <1 b 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@630 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <630 10>;
+                       interrupts = <1 c 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@640 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <640 10>;
+                       interrupts = <1 d 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@650 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <650 10>;
+                       interrupts = <1 e 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@660 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <660 10>;
+                       interrupts = <1 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpt@670 {       // General Purpose Timer
+                       compatible = "mpc5200b-gpt\0mpc52xx-gpt";
+                       device_type = "gpt";
+                       reg = <670 10>;
+                       interrupts = <1 10 0>;
+                       interrupt-parent = <500>;
+               };
+
+               rtc@800 {       // Real time clock
+                       compatible = "mpc5200b-rtc\0mpc52xx-rtc";
+                       device_type = "rtc";
+                       reg = <800 100>;
+                       interrupts = <1 5 0 1 6 0>;
+                       interrupt-parent = <500>;
+               };
+
+               mscan@900 {
+                       device_type = "mscan";
+                       compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+                       interrupts = <2 11 0>;
+                       interrupt-parent = <500>;
+                       reg = <900 80>;
+               };
+
+               mscan@980 {
+                       device_type = "mscan";
+                       compatible = "mpc5200b-mscan\0mpc52xx-mscan";
+                       interrupts = <1 12 0>;
+                       interrupt-parent = <500>;
+                       reg = <980 80>;
+               };
+
+               gpio@b00 {
+                       compatible = "mpc5200b-gpio\0mpc52xx-gpio";
+                       reg = <b00 40>;
+                       interrupts = <1 7 0>;
+                       interrupt-parent = <500>;
+               };
+
+               gpio-wkup@b00 {
+                       compatible = "mpc5200b-gpio-wkup\0mpc52xx-gpio-wkup";
+                       reg = <c00 40>;
+                       interrupts = <1 8 0 0 3 0>;
+                       interrupt-parent = <500>;
+               };
+
+               pci@0d00 {
+                       #interrupt-cells = <1>;
+                       #size-cells = <2>;
+                       #address-cells = <3>;
+                       device_type = "pci";
+                       compatible = "mpc5200b-pci\0mpc52xx-pci";
+                       reg = <d00 100>;
+                       interrupt-map-mask = <f800 0 0 7>;
+                       interrupt-map = <c000 0 0 1 500 0 0 3 // 1st slot
+                                        c000 0 0 2 500 1 1 3
+                                        c000 0 0 3 500 1 2 3
+                                        c000 0 0 4 500 1 3 3
+
+                                        c800 0 0 1 500 1 1 3 // 2nd slot
+                                        c800 0 0 2 500 1 2 3
+                                        c800 0 0 3 500 1 3 3
+                                        c800 0 0 4 500 0 0 3>;
+                       clock-frequency = <0>; // From boot loader
+                       interrupts = <2 8 0 2 9 0 2 a 0>;
+                       interrupt-parent = <500>;
+                       bus-range = <0 0>;
+                       ranges = <42000000 0 80000000 80000000 0 20000000
+                                 02000000 0 a0000000 a0000000 0 10000000
+                                 01000000 0 00000000 b0000000 0 01000000>;
+               };
+
+               spi@f00 {
+                       device_type = "spi";
+                       compatible = "mpc5200b-spi\0mpc52xx-spi";
+                       reg = <f00 20>;
+                       interrupts = <2 d 0 2 e 0>;
+                       interrupt-parent = <500>;
+               };
+
+               usb@1000 {
+                       device_type = "usb-ohci-be";
+                       compatible = "mpc5200b-ohci\0mpc52xx-ohci\0ohci-be";
+                       reg = <1000 ff>;
+                       interrupts = <2 6 0>;
+                       interrupt-parent = <500>;
+               };
+
+               bestcomm@1200 {
+                       device_type = "dma-controller";
+                       compatible = "mpc5200b-bestcomm\0mpc52xx-bestcomm";
+                       reg = <1200 80>;
+                       interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
+                                     3 4 0  3 5 0  3 6 0  3 7 0
+                                     3 8 0  3 9 0  3 a 0  3 b 0
+                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               xlb@1f00 {
+                       compatible = "mpc5200b-xlb\0mpc52xx-xlb";
+                       reg = <1f00 100>;
+               };
+
+               serial@2000 {           // PSC1
+                       device_type = "serial";
+                       compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+                       port-number = <0>;  // Logical port assignment
+                       reg = <2000 100>;
+                       interrupts = <2 1 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC2 in spi mode example
+               spi@2200 {              // PSC2
+                       device_type = "spi";
+                       compatible = "mpc5200b-psc-spi\0mpc52xx-psc-spi";
+                       reg = <2200 100>;
+                       interrupts = <2 2 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC3 in CODEC mode example
+               i2s@2400 {              // PSC3
+                       device_type = "i2s";
+                       compatible = "mpc5200b-psc-i2s\0mpc52xx-psc-i2s";
+                       reg = <2400 100>;
+                       interrupts = <2 3 0>;
+                       interrupt-parent = <500>;
+               };
+
+               // PSC4 unconfigured
+               //serial@2600 {         // PSC4
+               //      device_type = "serial";
+               //      compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+               //      reg = <2600 100>;
+               //      interrupts = <2 b 0>;
+               //      interrupt-parent = <500>;
+               //};
+
+               // PSC5 unconfigured
+               //serial@2800 {         // PSC5
+               //      device_type = "serial";
+               //      compatible = "mpc5200b-psc-uart\0mpc52xx-psc-uart";
+               //      reg = <2800 100>;
+               //      interrupts = <2 c 0>;
+               //      interrupt-parent = <500>;
+               //};
+
+               // PSC6 in AC97 mode example
+               ac97@2c00 {             // PSC6
+                       device_type = "ac97";
+                       compatible = "mpc5200b-psc-ac97\0mpc52xx-psc-ac97";
+                       reg = <2c00 100>;
+                       interrupts = <2 4 0>;
+                       interrupt-parent = <500>;
+               };
+
+               ethernet@3000 {
+                       device_type = "network";
+                       compatible = "mpc5200b-fec\0mpc52xx-fec";
+                       reg = <3000 800>;
+                       mac-address = [ 02 03 04 05 06 07 ]; // Bad!
+                       interrupts = <2 5 0>;
+                       interrupt-parent = <500>;
+               };
+
+               ata@3a00 {
+                       device_type = "ata";
+                       compatible = "mpc5200b-ata\0mpc52xx-ata";
+                       reg = <3a00 100>;
+                       interrupts = <2 7 0>;
+                       interrupt-parent = <500>;
+               };
+
+               i2c@3d00 {
+                       device_type = "i2c";
+                       compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+                       reg = <3d00 40>;
+                       interrupts = <2 f 0>;
+                       interrupt-parent = <500>;
+               };
+
+               i2c@3d40 {
+                       device_type = "i2c";
+                       compatible = "mpc5200b-i2c\0mpc52xx-i2c";
+                       reg = <3d40 40>;
+                       interrupts = <2 10 0>;
+                       interrupt-parent = <500>;
+               };
+               sram@8000 {
+                       device_type = "sram";
+                       compatible = "mpc5200b-sram\0mpc52xx-sram\0sram";
+                       reg = <8000 4000>;
+               };
+       };
+};
index d7b985e6bd2f18ac5b6e155f253e0b7c289aff58..c4d9562cbaadd40320f20d45db802ffbe85c77ad 100644 (file)
                        interrupt-map = <
 
                                /* IDSEL 0x11 */
-                               0800 0 0 1 7400 24 0
-                               0800 0 0 2 7400 25 0
-                               0800 0 0 3 7400 26 0
-                               0800 0 0 4 7400 27 0
+                               0800 0 0 1 1180 24 0
+                               0800 0 0 2 1180 25 0
+                               0800 0 0 3 1180 26 0
+                               0800 0 0 4 1180 27 0
 
                                /* IDSEL 0x12 */
-                               1000 0 0 1 7400 25 0
-                               1000 0 0 2 7400 26 0
-                               1000 0 0 3 7400 27 0
-                               1000 0 0 4 7400 24 0
+                               1000 0 0 1 1180 25 0
+                               1000 0 0 2 1180 26 0
+                               1000 0 0 3 1180 27 0
+                               1000 0 0 4 1180 24 0
 
                                /* IDSEL 0x13 */
-                               1800 0 0 1 7400 26 0
-                               1800 0 0 2 7400 27 0
-                               1800 0 0 3 7400 24 0
-                               1800 0 0 4 7400 25 0
+                               1800 0 0 1 1180 26 0
+                               1800 0 0 2 1180 27 0
+                               1800 0 0 3 1180 24 0
+                               1800 0 0 4 1180 25 0
 
                                /* IDSEL 0x14 */
-                               2000 0 0 1 7400 27 0
-                               2000 0 0 2 7400 24 0
-                               2000 0 0 3 7400 25 0
-                               2000 0 0 4 7400 26 0
+                               2000 0 0 1 1180 27 0
+                               2000 0 0 2 1180 24 0
+                               2000 0 0 3 1180 25 0
+                               2000 0 0 4 1180 26 0
                                >;
+                       router@1180 {
+                               linux,phandle = <1180>;
+                               clock-frequency = <0>;
+                               interrupt-controller;
+                               device_type = "pic-router";
+                               #address-cells = <0>;
+                               #interrupt-cells = <2>;
+                               built-in;
+                               big-endian;
+                               interrupts = <17 2>;
+                               interrupt-parent = <7400>;
+                       };
                };
        };
 
diff --git a/arch/powerpc/boot/flatdevtree.c b/arch/powerpc/boot/flatdevtree.c
new file mode 100644 (file)
index 0000000..c76c194
--- /dev/null
@@ -0,0 +1,880 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright Pantelis Antoniou 2006
+ * Copyright (C) IBM Corporation 2006
+ *
+ * Authors: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *         Hollis Blanchard <hollisb@us.ibm.com>
+ *         Mark A. Greer <mgreer@mvista.com>
+ *         Paul Mackerras <paulus@samba.org>
+ */
+
+#include <string.h>
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "flatdevtree_env.h"
+
+#define _ALIGN(x, al)  (((x) + (al) - 1) & ~((al) - 1))
+
+/* Routines for keeping node ptrs returned by ft_find_device current */
+/* First entry not used b/c it would return 0 and be taken as NULL/error */
+static void *ft_node_add(struct ft_cxt *cxt, char *node)
+{
+       unsigned int i;
+
+       for (i = 1; i < cxt->nodes_used; i++)   /* already there? */
+               if (cxt->node_tbl[i] == node)
+                       return (void *)i;
+
+       if (cxt->nodes_used < cxt->node_max) {
+               cxt->node_tbl[cxt->nodes_used] = node;
+               return (void *)cxt->nodes_used++;
+       }
+
+       return NULL;
+}
+
+static char *ft_node_ph2node(struct ft_cxt *cxt, const void *phandle)
+{
+       unsigned int i = (unsigned int)phandle;
+
+       if (i < cxt->nodes_used)
+               return cxt->node_tbl[i];
+       return NULL;
+}
+
+static void ft_node_update_before(struct ft_cxt *cxt, char *addr, int shift)
+{
+       unsigned int i;
+
+       if (shift == 0)
+               return;
+
+       for (i = 1; i < cxt->nodes_used; i++)
+               if (cxt->node_tbl[i] < addr)
+                       cxt->node_tbl[i] += shift;
+}
+
+static void ft_node_update_after(struct ft_cxt *cxt, char *addr, int shift)
+{
+       unsigned int i;
+
+       if (shift == 0)
+               return;
+
+       for (i = 1; i < cxt->nodes_used; i++)
+               if (cxt->node_tbl[i] >= addr)
+                       cxt->node_tbl[i] += shift;
+}
+
+/* Struct used to return info from ft_next() */
+struct ft_atom {
+       u32 tag;
+       const char *name;
+       void *data;
+       u32 size;
+};
+
+/* Set ptrs to current one's info; return addr of next one */
+static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret)
+{
+       u32 sz;
+
+       if (p >= cxt->rgn[FT_STRUCT].start + cxt->rgn[FT_STRUCT].size)
+               return NULL;
+
+       ret->tag = be32_to_cpu(*(u32 *) p);
+       p += 4;
+
+       switch (ret->tag) {     /* Tag */
+       case OF_DT_BEGIN_NODE:
+               ret->name = p;
+               ret->data = (void *)(p - 4);    /* start of node */
+               p += _ALIGN(strlen(p) + 1, 4);
+               break;
+       case OF_DT_PROP:
+               ret->size = sz = be32_to_cpu(*(u32 *) p);
+               ret->name = cxt->str_anchor + be32_to_cpu(*(u32 *) (p + 4));
+               ret->data = (void *)(p + 8);
+               p += 8 + _ALIGN(sz, 4);
+               break;
+       case OF_DT_END_NODE:
+       case OF_DT_NOP:
+               break;
+       case OF_DT_END:
+       default:
+               p = NULL;
+               break;
+       }
+
+       return p;
+}
+
+#define HDR_SIZE       _ALIGN(sizeof(struct boot_param_header), 8)
+#define EXPAND_INCR    1024    /* alloc this much extra when expanding */
+
+/* See if the regions are in the standard order and non-overlapping */
+static int ft_ordered(struct ft_cxt *cxt)
+{
+       char *p = (char *)cxt->bph + HDR_SIZE;
+       enum ft_rgn_id r;
+
+       for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+               if (p > cxt->rgn[r].start)
+                       return 0;
+               p = cxt->rgn[r].start + cxt->rgn[r].size;
+       }
+       return p <= (char *)cxt->bph + cxt->max_size;
+}
+
+/* Copy the tree to a newly-allocated region and put things in order */
+static int ft_reorder(struct ft_cxt *cxt, int nextra)
+{
+       unsigned long tot;
+       enum ft_rgn_id r;
+       char *p, *pend;
+       int stroff;
+
+       tot = HDR_SIZE + EXPAND_INCR;
+       for (r = FT_RSVMAP; r <= FT_STRINGS; ++r)
+               tot += cxt->rgn[r].size;
+       if (nextra > 0)
+               tot += nextra;
+       tot = _ALIGN(tot, 8);
+
+       if (!cxt->realloc)
+               return 0;
+       p = cxt->realloc(NULL, tot);
+       if (!p)
+               return 0;
+
+       memcpy(p, cxt->bph, sizeof(struct boot_param_header));
+       /* offsets get fixed up later */
+
+       cxt->bph = (struct boot_param_header *)p;
+       cxt->max_size = tot;
+       pend = p + tot;
+       p += HDR_SIZE;
+
+       memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size);
+       cxt->rgn[FT_RSVMAP].start = p;
+       p += cxt->rgn[FT_RSVMAP].size;
+
+       memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size);
+       ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+                       p - cxt->rgn[FT_STRUCT].start);
+       cxt->p += p - cxt->rgn[FT_STRUCT].start;
+       cxt->rgn[FT_STRUCT].start = p;
+
+       p = pend - cxt->rgn[FT_STRINGS].size;
+       memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size);
+       stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start;
+       cxt->rgn[FT_STRINGS].start = p;
+       cxt->str_anchor = p + stroff;
+
+       cxt->isordered = 1;
+       return 1;
+}
+
+static inline char *prev_end(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+       if (r > FT_RSVMAP)
+               return cxt->rgn[r - 1].start + cxt->rgn[r - 1].size;
+       return (char *)cxt->bph + HDR_SIZE;
+}
+
+static inline char *next_start(struct ft_cxt *cxt, enum ft_rgn_id r)
+{
+       if (r < FT_STRINGS)
+               return cxt->rgn[r + 1].start;
+       return (char *)cxt->bph + cxt->max_size;
+}
+
+/*
+ * See if we can expand region rgn by nextra bytes by using up
+ * free space after or before the region.
+ */
+static int ft_shuffle(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+               int nextra)
+{
+       char *p = *pp;
+       char *rgn_start, *rgn_end;
+
+       rgn_start = cxt->rgn[rgn].start;
+       rgn_end = rgn_start + cxt->rgn[rgn].size;
+       if (nextra <= 0 || rgn_end + nextra <= next_start(cxt, rgn)) {
+               /* move following stuff */
+               if (p < rgn_end) {
+                       if (nextra < 0)
+                               memmove(p, p - nextra, rgn_end - p + nextra);
+                       else
+                               memmove(p + nextra, p, rgn_end - p);
+                       if (rgn == FT_STRUCT)
+                               ft_node_update_after(cxt, p, nextra);
+               }
+               cxt->rgn[rgn].size += nextra;
+               if (rgn == FT_STRINGS)
+                       /* assumes strings only added at beginning */
+                       cxt->str_anchor += nextra;
+               return 1;
+       }
+       if (prev_end(cxt, rgn) <= rgn_start - nextra) {
+               /* move preceding stuff */
+               if (p > rgn_start) {
+                       memmove(rgn_start - nextra, rgn_start, p - rgn_start);
+                       if (rgn == FT_STRUCT)
+                               ft_node_update_before(cxt, p, -nextra);
+               }
+               *p -= nextra;
+               cxt->rgn[rgn].start -= nextra;
+               cxt->rgn[rgn].size += nextra;
+               return 1;
+       }
+       return 0;
+}
+
+static int ft_make_space(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn,
+                        int nextra)
+{
+       unsigned long size, ssize, tot;
+       char *str, *next;
+       enum ft_rgn_id r;
+
+       if (!cxt->isordered && !ft_reorder(cxt, nextra))
+               return 0;
+       if (ft_shuffle(cxt, pp, rgn, nextra))
+               return 1;
+
+       /* See if there is space after the strings section */
+       ssize = cxt->rgn[FT_STRINGS].size;
+       if (cxt->rgn[FT_STRINGS].start + ssize
+                       < (char *)cxt->bph + cxt->max_size) {
+               /* move strings up as far as possible */
+               str = (char *)cxt->bph + cxt->max_size - ssize;
+               cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+               memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+               cxt->rgn[FT_STRINGS].start = str;
+               /* enough space now? */
+               if (rgn >= FT_STRUCT && ft_shuffle(cxt, pp, rgn, nextra))
+                       return 1;
+       }
+
+       /* how much total free space is there following this region? */
+       tot = 0;
+       for (r = rgn; r < FT_STRINGS; ++r) {
+               char *r_end = cxt->rgn[r].start + cxt->rgn[r].size;
+               tot += next_start(cxt, rgn) - r_end;
+       }
+
+       /* cast is to shut gcc up; we know nextra >= 0 */
+       if (tot < (unsigned int)nextra) {
+               /* have to reallocate */
+               char *newp, *new_start;
+               int shift;
+
+               if (!cxt->realloc)
+                       return 0;
+               size = _ALIGN(cxt->max_size + (nextra - tot) + EXPAND_INCR, 8);
+               newp = cxt->realloc(cxt->bph, size);
+               if (!newp)
+                       return 0;
+               cxt->max_size = size;
+               shift = newp - (char *)cxt->bph;
+
+               if (shift) { /* realloc can return same addr */
+                       cxt->bph = (struct boot_param_header *)newp;
+                       ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start,
+                                       shift);
+                       for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) {
+                               new_start = cxt->rgn[r].start + shift;
+                               cxt->rgn[r].start = new_start;
+                       }
+                       *pp += shift;
+                       cxt->str_anchor += shift;
+               }
+
+               /* move strings up to the end */
+               str = newp + size - ssize;
+               cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start;
+               memmove(str, cxt->rgn[FT_STRINGS].start, ssize);
+               cxt->rgn[FT_STRINGS].start = str;
+
+               if (ft_shuffle(cxt, pp, rgn, nextra))
+                       return 1;
+       }
+
+       /* must be FT_RSVMAP and we need to move FT_STRUCT up */
+       if (rgn == FT_RSVMAP) {
+               next = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+                       + nextra;
+               ssize = cxt->rgn[FT_STRUCT].size;
+               if (next + ssize >= cxt->rgn[FT_STRINGS].start)
+                       return 0;       /* "can't happen" */
+               memmove(next, cxt->rgn[FT_STRUCT].start, ssize);
+               ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, nextra);
+               cxt->rgn[FT_STRUCT].start = next;
+
+               if (ft_shuffle(cxt, pp, rgn, nextra))
+                       return 1;
+       }
+
+       return 0;               /* "can't happen" */
+}
+
+static void ft_put_word(struct ft_cxt *cxt, u32 v)
+{
+       *(u32 *) cxt->p = cpu_to_be32(v);
+       cxt->p += 4;
+}
+
+static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz)
+{
+       unsigned long sza = _ALIGN(sz, 4);
+
+       /* zero out the alignment gap if necessary */
+       if (sz < sza)
+               *(u32 *) (cxt->p + sza - 4) = 0;
+
+       /* copy in the data */
+       memcpy(cxt->p, data, sz);
+
+       cxt->p += sza;
+}
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name)
+{
+       unsigned long nlen = strlen(name) + 1;
+       unsigned long len = 8 + _ALIGN(nlen, 4);
+
+       if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+               return -1;
+       ft_put_word(cxt, OF_DT_BEGIN_NODE);
+       ft_put_bin(cxt, name, strlen(name) + 1);
+       return 0;
+}
+
+void ft_end_node(struct ft_cxt *cxt)
+{
+       ft_put_word(cxt, OF_DT_END_NODE);
+}
+
+void ft_nop(struct ft_cxt *cxt)
+{
+       if (ft_make_space(cxt, &cxt->p, FT_STRUCT, 4))
+               ft_put_word(cxt, OF_DT_NOP);
+}
+
+#define NO_STRING      0x7fffffff
+
+static int lookup_string(struct ft_cxt *cxt, const char *name)
+{
+       char *p, *end;
+
+       p = cxt->rgn[FT_STRINGS].start;
+       end = p + cxt->rgn[FT_STRINGS].size;
+       while (p < end) {
+               if (strcmp(p, (char *)name) == 0)
+                       return p - cxt->str_anchor;
+               p += strlen(p) + 1;
+       }
+
+       return NO_STRING;
+}
+
+/* lookup string and insert if not found */
+static int map_string(struct ft_cxt *cxt, const char *name)
+{
+       int off;
+       char *p;
+
+       off = lookup_string(cxt, name);
+       if (off != NO_STRING)
+               return off;
+       p = cxt->rgn[FT_STRINGS].start;
+       if (!ft_make_space(cxt, &p, FT_STRINGS, strlen(name) + 1))
+               return NO_STRING;
+       strcpy(p, name);
+       return p - cxt->str_anchor;
+}
+
+int ft_prop(struct ft_cxt *cxt, const char *name, const void *data,
+               unsigned int sz)
+{
+       int off, len;
+
+       off = lookup_string(cxt, name);
+       if (off == NO_STRING)
+               return -1;
+
+       len = 12 + _ALIGN(sz, 4);
+       if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len))
+               return -1;
+
+       ft_put_word(cxt, OF_DT_PROP);
+       ft_put_word(cxt, sz);
+       ft_put_word(cxt, off);
+       ft_put_bin(cxt, data, sz);
+       return 0;
+}
+
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str)
+{
+       return ft_prop(cxt, name, str, strlen(str) + 1);
+}
+
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val)
+{
+       u32 v = cpu_to_be32((u32) val);
+
+       return ft_prop(cxt, name, &v, 4);
+}
+
+/* Calculate the size of the reserved map */
+static unsigned long rsvmap_size(struct ft_cxt *cxt)
+{
+       struct ft_reserve *res;
+
+       res = (struct ft_reserve *)cxt->rgn[FT_RSVMAP].start;
+       while (res->start || res->len)
+               ++res;
+       return (char *)(res + 1) - cxt->rgn[FT_RSVMAP].start;
+}
+
+/* Calculate the size of the struct region by stepping through it */
+static unsigned long struct_size(struct ft_cxt *cxt)
+{
+       char *p = cxt->rgn[FT_STRUCT].start;
+       char *next;
+       struct ft_atom atom;
+
+       /* make check in ft_next happy */
+       if (cxt->rgn[FT_STRUCT].size == 0)
+               cxt->rgn[FT_STRUCT].size = 0xfffffffful - (unsigned long)p;
+
+       while ((next = ft_next(cxt, p, &atom)) != NULL)
+               p = next;
+       return p + 4 - cxt->rgn[FT_STRUCT].start;
+}
+
+/* add `adj' on to all string offset values in the struct area */
+static void adjust_string_offsets(struct ft_cxt *cxt, int adj)
+{
+       char *p = cxt->rgn[FT_STRUCT].start;
+       char *next;
+       struct ft_atom atom;
+       int off;
+
+       while ((next = ft_next(cxt, p, &atom)) != NULL) {
+               if (atom.tag == OF_DT_PROP) {
+                       off = be32_to_cpu(*(u32 *) (p + 8));
+                       *(u32 *) (p + 8) = cpu_to_be32(off + adj);
+               }
+               p = next;
+       }
+}
+
+/* start construction of the flat OF tree from scratch */
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+               void *(*realloc_fn) (void *, unsigned long))
+{
+       struct boot_param_header *bph = blob;
+       char *p;
+       struct ft_reserve *pres;
+
+       /* clear the cxt */
+       memset(cxt, 0, sizeof(*cxt));
+
+       cxt->bph = bph;
+       cxt->max_size = max_size;
+       cxt->realloc = realloc_fn;
+       cxt->isordered = 1;
+
+       /* zero everything in the header area */
+       memset(bph, 0, sizeof(*bph));
+
+       bph->magic = cpu_to_be32(OF_DT_HEADER);
+       bph->version = cpu_to_be32(0x10);
+       bph->last_comp_version = cpu_to_be32(0x10);
+
+       /* start pointers */
+       cxt->rgn[FT_RSVMAP].start = p = blob + HDR_SIZE;
+       cxt->rgn[FT_RSVMAP].size = sizeof(struct ft_reserve);
+       pres = (struct ft_reserve *)p;
+       cxt->rgn[FT_STRUCT].start = p += sizeof(struct ft_reserve);
+       cxt->rgn[FT_STRUCT].size = 4;
+       cxt->rgn[FT_STRINGS].start = blob + max_size;
+       cxt->rgn[FT_STRINGS].size = 0;
+
+       /* init rsvmap and struct */
+       pres->start = 0;
+       pres->len = 0;
+       *(u32 *) p = cpu_to_be32(OF_DT_END);
+
+       cxt->str_anchor = blob;
+}
+
+/* open up an existing blob to be examined or modified */
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+               unsigned int max_find_device,
+               void *(*realloc_fn) (void *, unsigned long))
+{
+       struct boot_param_header *bph = blob;
+
+       /* can't cope with version < 16 */
+       if (be32_to_cpu(bph->version) < 16)
+               return -1;
+
+       /* clear the cxt */
+       memset(cxt, 0, sizeof(*cxt));
+
+       /* alloc node_tbl to track node ptrs returned by ft_find_device */
+       ++max_find_device;
+       cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *));
+       if (!cxt->node_tbl)
+               return -1;
+       memset(cxt->node_tbl, 0, max_find_device * sizeof(char *));
+       cxt->node_max = max_find_device;
+       cxt->nodes_used = 1;    /* don't use idx 0 b/c looks like NULL */
+
+       cxt->bph = bph;
+       cxt->max_size = max_size;
+       cxt->realloc = realloc_fn;
+
+       cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap);
+       cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt);
+       cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct);
+       cxt->rgn[FT_STRUCT].size = struct_size(cxt);
+       cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings);
+       cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size);
+       /* Leave as '0' to force first ft_make_space call to do a ft_reorder
+        * and move dt to an area allocated by realloc.
+       cxt->isordered = ft_ordered(cxt);
+       */
+
+       cxt->p = cxt->rgn[FT_STRUCT].start;
+       cxt->str_anchor = cxt->rgn[FT_STRINGS].start;
+
+       return 0;
+}
+
+/* add a reserver physical area to the rsvmap */
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size)
+{
+       char *p;
+       struct ft_reserve *pres;
+
+       p = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size
+               - sizeof(struct ft_reserve);
+       if (!ft_make_space(cxt, &p, FT_RSVMAP, sizeof(struct ft_reserve)))
+               return -1;
+
+       pres = (struct ft_reserve *)p;
+       pres->start = cpu_to_be64(physaddr);
+       pres->len = cpu_to_be64(size);
+
+       return 0;
+}
+
+void ft_begin_tree(struct ft_cxt *cxt)
+{
+       cxt->p = cxt->rgn[FT_STRUCT].start;
+}
+
+void ft_end_tree(struct ft_cxt *cxt)
+{
+       struct boot_param_header *bph = cxt->bph;
+       char *p, *oldstr, *str, *endp;
+       unsigned long ssize;
+       int adj;
+
+       if (!cxt->isordered)
+               return;         /* we haven't touched anything */
+
+       /* adjust string offsets */
+       oldstr = cxt->rgn[FT_STRINGS].start;
+       adj = cxt->str_anchor - oldstr;
+       if (adj)
+               adjust_string_offsets(cxt, adj);
+
+       /* make strings end on 8-byte boundary */
+       ssize = cxt->rgn[FT_STRINGS].size;
+       endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start
+                       + cxt->rgn[FT_STRUCT].size + ssize, 8);
+       str = endp - ssize;
+
+       /* move strings down to end of structs */
+       memmove(str, oldstr, ssize);
+       cxt->str_anchor = str;
+       cxt->rgn[FT_STRINGS].start = str;
+
+       /* fill in header fields */
+       p = (char *)bph;
+       bph->totalsize = cpu_to_be32(endp - p);
+       bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p);
+       bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p);
+       bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p);
+       bph->dt_strings_size = cpu_to_be32(ssize);
+}
+
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path)
+{
+       char *node;
+
+       /* require absolute path */
+       if (srch_path[0] != '/')
+               return NULL;
+       node = ft_find_descendent(cxt, cxt->rgn[FT_STRUCT].start, srch_path);
+       return ft_node_add(cxt, node);
+}
+
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path)
+{
+       struct ft_atom atom;
+       char *p;
+       const char *cp, *q;
+       int cl;
+       int depth = -1;
+       int dmatch = 0;
+       const char *path_comp[FT_MAX_DEPTH];
+
+       cp = srch_path;
+       cl = 0;
+       p = top;
+
+       while ((p = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+                       ++depth;
+                       if (depth != dmatch)
+                               break;
+                       cxt->genealogy[depth] = atom.data;
+                       cxt->genealogy[depth + 1] = NULL;
+                       if (depth && !(strncmp(atom.name, cp, cl) == 0
+                                       && (atom.name[cl] == '/'
+                                               || atom.name[cl] == '\0'
+                                               || atom.name[cl] == '@')))
+                               break;
+                       path_comp[dmatch] = cp;
+                       /* it matches so far, advance to next path component */
+                       cp += cl;
+                       /* skip slashes */
+                       while (*cp == '/')
+                               ++cp;
+                       /* we're done if this is the end of the string */
+                       if (*cp == 0)
+                               return atom.data;
+                       /* look for end of this component */
+                       q = strchr(cp, '/');
+                       if (q)
+                               cl = q - cp;
+                       else
+                               cl = strlen(cp);
+                       ++dmatch;
+                       break;
+               case OF_DT_END_NODE:
+                       if (depth == 0)
+                               return NULL;
+                       if (dmatch > depth) {
+                               --dmatch;
+                               cl = cp - path_comp[dmatch] - 1;
+                               cp = path_comp[dmatch];
+                               while (cl > 0 && cp[cl - 1] == '/')
+                                       --cl;
+                       }
+                       --depth;
+                       break;
+               }
+       }
+       return NULL;
+}
+
+void *ft_get_parent(struct ft_cxt *cxt, const void *phandle)
+{
+       void *node;
+       int d;
+       struct ft_atom atom;
+       char *p;
+
+       node = ft_node_ph2node(cxt, phandle);
+       if (node == NULL)
+               return NULL;
+
+       for (d = 0; cxt->genealogy[d] != NULL; ++d)
+               if (cxt->genealogy[d] == node)
+                       return cxt->genealogy[d > 0 ? d - 1 : 0];
+
+       /* have to do it the hard way... */
+       p = cxt->rgn[FT_STRUCT].start;
+       d = 0;
+       while ((p = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+                       cxt->genealogy[d] = atom.data;
+                       if (node == atom.data) {
+                               /* found it */
+                               cxt->genealogy[d + 1] = NULL;
+                               return d > 0 ? cxt->genealogy[d - 1] : node;
+                       }
+                       ++d;
+                       break;
+               case OF_DT_END_NODE:
+                       --d;
+                       break;
+               }
+       }
+       return NULL;
+}
+
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+               void *buf, const unsigned int buflen)
+{
+       struct ft_atom atom;
+       void *node;
+       char *p;
+       int depth;
+       unsigned int size;
+
+       node = ft_node_ph2node(cxt, phandle);
+       if (node == NULL)
+               return -1;
+
+       depth = 0;
+       p = (char *)node;
+
+       while ((p = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+                       ++depth;
+                       break;
+               case OF_DT_PROP:
+                       if ((depth != 1) || strcmp(atom.name, propname))
+                               break;
+                       size = min(atom.size, buflen);
+                       memcpy(buf, atom.data, size);
+                       return atom.size;
+               case OF_DT_END_NODE:
+                       if (--depth <= 0)
+                               return -1;
+               }
+       }
+       return -1;
+}
+
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+               const void *buf, const unsigned int buflen)
+{
+       struct ft_atom atom;
+       void *node;
+       char *p, *next;
+       int nextra, depth;
+
+       node = ft_node_ph2node(cxt, phandle);
+       if (node == NULL)
+               return -1;
+
+       depth = 0;
+       p = node;
+
+       while ((next = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+                       ++depth;
+                       break;
+               case OF_DT_END_NODE:
+                       if (--depth > 0)
+                               break;
+                       /* haven't found the property, insert here */
+                       cxt->p = p;
+                       return ft_prop(cxt, propname, buf, buflen);
+               case OF_DT_PROP:
+                       if ((depth != 1) || strcmp(atom.name, propname))
+                               break;
+                       /* found an existing property, overwrite it */
+                       nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4);
+                       cxt->p = atom.data;
+                       if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT,
+                                               nextra))
+                               return -1;
+                       *(u32 *) (cxt->p - 8) = cpu_to_be32(buflen);
+                       ft_put_bin(cxt, buf, buflen);
+                       return 0;
+               }
+               p = next;
+       }
+       return -1;
+}
+
+int ft_del_prop(struct ft_cxt *cxt, const void *phandle, const char *propname)
+{
+       struct ft_atom atom;
+       void *node;
+       char *p, *next;
+       int size;
+
+       node = ft_node_ph2node(cxt, phandle);
+       if (node == NULL)
+               return -1;
+
+       p = node;
+       while ((next = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+               case OF_DT_END_NODE:
+                       return -1;
+               case OF_DT_PROP:
+                       if (strcmp(atom.name, propname))
+                               break;
+                       /* found the property, remove it */
+                       size = 12 + -_ALIGN(atom.size, 4);
+                       cxt->p = p;
+                       if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, -size))
+                               return -1;
+                       return 0;
+               }
+               p = next;
+       }
+       return -1;
+}
+
+void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *path)
+{
+       struct ft_atom atom;
+       char *p, *next;
+       int depth = 0;
+
+       p = cxt->rgn[FT_STRUCT].start;
+       while ((next = ft_next(cxt, p, &atom)) != NULL) {
+               switch (atom.tag) {
+               case OF_DT_BEGIN_NODE:
+                       ++depth;
+                       if (depth == 1 && strcmp(atom.name, path) == 0)
+                               /* duplicate node path, return error */
+                               return NULL;
+                       break;
+               case OF_DT_END_NODE:
+                       --depth;
+                       if (depth > 0)
+                               break;
+                       /* end of node, insert here */
+                       cxt->p = p;
+                       ft_begin_node(cxt, path);
+                       ft_end_node(cxt);
+                       return p;
+               }
+               p = next;
+       }
+       return NULL;
+}
index 761c8dc840080543b35276883328c939dff4fa3b..b9cd9f61f351e47df2032c6328fbfefafdf007ec 100644 (file)
@@ -17,7 +17,7 @@
 #ifndef FLATDEVTREE_H
 #define FLATDEVTREE_H
 
-#include "types.h"
+#include "flatdevtree_env.h"
 
 /* Definitions used by the flattened device tree */
 #define OF_DT_HEADER            0xd00dfeed      /* marker */
@@ -43,4 +43,64 @@ struct boot_param_header {
        u32 dt_strings_size;    /* size of the DT strings block */
 };
 
+struct ft_reserve {
+       u64 start;
+       u64 len;
+};
+
+struct ft_region {
+       char *start;
+       unsigned long size;
+};
+
+enum ft_rgn_id {
+       FT_RSVMAP,
+       FT_STRUCT,
+       FT_STRINGS,
+       FT_N_REGION
+};
+
+#define FT_MAX_DEPTH   50
+
+struct ft_cxt {
+       struct boot_param_header *bph;
+       int max_size;           /* maximum size of tree */
+       int isordered;          /* everything in standard order */
+       void *(*realloc)(void *, unsigned long);
+       char *str_anchor;
+       char *p;                /* current insertion point in structs */
+       struct ft_region rgn[FT_N_REGION];
+       void *genealogy[FT_MAX_DEPTH+1];
+       char **node_tbl;
+       unsigned int node_max;
+       unsigned int nodes_used;
+};
+
+int ft_begin_node(struct ft_cxt *cxt, const char *name);
+void ft_end_node(struct ft_cxt *cxt);
+
+void ft_begin_tree(struct ft_cxt *cxt);
+void ft_end_tree(struct ft_cxt *cxt);
+
+void ft_nop(struct ft_cxt *cxt);
+int ft_prop(struct ft_cxt *cxt, const char *name,
+           const void *data, unsigned int sz);
+int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str);
+int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val);
+void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+             void *(*realloc_fn)(void *, unsigned long));
+int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size,
+               unsigned int max_find_device,
+               void *(*realloc_fn)(void *, unsigned long));
+int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size);
+
+void ft_dump_blob(const void *bphp);
+void ft_merge_blob(struct ft_cxt *cxt, void *blob);
+void *ft_find_device(struct ft_cxt *cxt, const char *srch_path);
+void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path);
+int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+               void *buf, const unsigned int buflen);
+int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname,
+               const void *buf, const unsigned int buflen);
+
 #endif /* FLATDEVTREE_H */
diff --git a/arch/powerpc/boot/flatdevtree_env.h b/arch/powerpc/boot/flatdevtree_env.h
new file mode 100644 (file)
index 0000000..83bc1c7
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * This file adds the header file glue so that the shared files
+ * flatdevicetree.[ch] can compile and work in the powerpc bootwrapper.
+ *
+ * strncmp & strchr copied from <file:lib/strings.c>
+ * Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * Maintained by: Mark A. Greer <mgreer@mvista.com>
+ */
+#ifndef _PPC_BOOT_FLATDEVTREE_ENV_H_
+#define _PPC_BOOT_FLATDEVTREE_ENV_H_
+
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "ops.h"
+
+#define be16_to_cpu(x)         (x)
+#define cpu_to_be16(x)         (x)
+#define be32_to_cpu(x)         (x)
+#define cpu_to_be32(x)         (x)
+#define be64_to_cpu(x)         (x)
+#define cpu_to_be64(x)         (x)
+
+static inline int strncmp(const char *cs, const char *ct, size_t count)
+{
+       signed char __res = 0;
+
+       while (count) {
+               if ((__res = *cs - *ct++) != 0 || !*cs++)
+                       break;
+               count--;
+       }
+       return __res;
+}
+
+static inline char *strchr(const char *s, int c)
+{
+       for (; *s != (char)c; ++s)
+               if (*s == '\0')
+                       return NULL;
+       return (char *)s;
+}
+
+#endif /* _PPC_BOOT_FLATDEVTREE_ENV_H_ */
diff --git a/arch/powerpc/boot/flatdevtree_misc.c b/arch/powerpc/boot/flatdevtree_misc.c
new file mode 100644 (file)
index 0000000..04da38f
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * This file does the necessary interface mapping between the bootwrapper
+ * device tree operations and the interface provided by shared source
+ * files flatdevicetree.[ch].
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stddef.h>
+#include "flatdevtree.h"
+#include "ops.h"
+
+static struct ft_cxt cxt;
+
+static void *ft_finddevice(const char *name)
+{
+       return ft_find_device(&cxt, name);
+}
+
+static int ft_getprop(const void *phandle, const char *propname, void *buf,
+               const int buflen)
+{
+       return ft_get_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static int ft_setprop(const void *phandle, const char *propname,
+               const void *buf, const int buflen)
+{
+       return ft_set_prop(&cxt, phandle, propname, buf, buflen);
+}
+
+static unsigned long ft_finalize(void)
+{
+       ft_end_tree(&cxt);
+       return (unsigned long)cxt.bph;
+}
+
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device)
+{
+       dt_ops.finddevice = ft_finddevice;
+       dt_ops.getprop = ft_getprop;
+       dt_ops.setprop = ft_setprop;
+       dt_ops.finalize = ft_finalize;
+
+       return ft_open(&cxt, dt_blob, max_size, max_find_device,
+                       platform_ops.realloc);
+}
diff --git a/arch/powerpc/boot/io.h b/arch/powerpc/boot/io.h
new file mode 100644 (file)
index 0000000..32974ed
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef _IO_H
+#define __IO_H
+/*
+ * Low-level I/O routines.
+ *
+ * Copied from <file:include/asm-powerpc/io.h> (which has no copyright)
+ */
+static inline int in_8(const volatile unsigned char *addr)
+{
+       int ret;
+
+       __asm__ __volatile__("lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
+                            : "=r" (ret) : "m" (*addr));
+       return ret;
+}
+
+static inline void out_8(volatile unsigned char *addr, int val)
+{
+       __asm__ __volatile__("stb%U0%X0 %1,%0; sync"
+                            : "=m" (*addr) : "r" (val));
+}
+
+static inline unsigned in_le32(const volatile unsigned *addr)
+{
+       unsigned ret;
+
+       __asm__ __volatile__("lwbrx %0,0,%1; twi 0,%0,0; isync"
+                            : "=r" (ret) : "r" (addr), "m" (*addr));
+       return ret;
+}
+
+static inline unsigned in_be32(const volatile unsigned *addr)
+{
+       unsigned ret;
+
+       __asm__ __volatile__("lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
+                            : "=r" (ret) : "m" (*addr));
+       return ret;
+}
+
+static inline void out_le32(volatile unsigned *addr, int val)
+{
+       __asm__ __volatile__("stwbrx %1,0,%2; sync" : "=m" (*addr)
+                            : "r" (val), "r" (addr));
+}
+
+static inline void out_be32(volatile unsigned *addr, int val)
+{
+       __asm__ __volatile__("stw%U0%X0 %1,%0; sync"
+                            : "=m" (*addr) : "r" (val));
+}
+
+#endif /* _IO_H */
index d719bb9333d1852b0bf20163aa61a0d85520f18f..6f6b50d238b6fa17a19f62552c2560610967927b 100644 (file)
@@ -27,6 +27,8 @@ extern char _vmlinux_start[];
 extern char _vmlinux_end[];
 extern char _initrd_start[];
 extern char _initrd_end[];
+extern char _dtb_start[];
+extern char _dtb_end[];
 
 struct addr_range {
        unsigned long addr;
@@ -167,7 +169,7 @@ static int is_elf32(void *hdr)
        return 1;
 }
 
-static void prep_kernel(unsigned long *a1, unsigned long *a2)
+static void prep_kernel(unsigned long a1, unsigned long a2)
 {
        int len;
 
@@ -203,11 +205,14 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
        }
 
        /*
-        * Now we try to alloc memory for the initrd (and copy it there)
+        * Now find the initrd
+        *
+        * First see if we have an image attached to us.  If so
+        * allocate memory for it and copy it there.
         */
        initrd.size = (unsigned long)(_initrd_end - _initrd_start);
        initrd.memsize = initrd.size;
-       if ( initrd.size > 0 ) {
+       if (initrd.size > 0) {
                printf("Allocating 0x%lx bytes for initrd ...\n\r",
                       initrd.size);
                initrd.addr = (unsigned long)malloc((u32)initrd.size);
@@ -216,8 +221,6 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
                                        "ramdisk !\n\r");
                        exit();
                }
-               *a1 = initrd.addr;
-               *a2 = initrd.size;
                printf("initial ramdisk moving 0x%lx <- 0x%lx "
                        "(0x%lx bytes)\n\r", initrd.addr,
                        (unsigned long)_initrd_start, initrd.size);
@@ -225,6 +228,12 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
                        initrd.size);
                printf("initrd head: 0x%lx\n\r",
                                *((unsigned long *)initrd.addr));
+       } else if (a2 != 0) {
+               /* Otherwise, see if yaboot or another loader gave us an initrd */
+               initrd.addr = a1;
+               initrd.memsize = initrd.size = a2;
+               printf("Using loader supplied initrd at 0x%lx (0x%lx bytes)\n\r",
+                      initrd.addr, initrd.size);
        }
 
        /* Eventually gunzip the kernel */
@@ -250,10 +259,6 @@ static void prep_kernel(unsigned long *a1, unsigned long *a2)
        flush_cache((void *)vmlinux.addr, vmlinux.size);
 }
 
-void __attribute__ ((weak)) ft_init(void *dt_blob)
-{
-}
-
 /* A buffer that may be edited by tools operating on a zImage binary so as to
  * edit the command line passed to vmlinux (by setting /chosen/bootargs).
  * The buffer is put in it's own section so that tools may locate it easier.
@@ -285,36 +290,22 @@ static void set_cmdline(char *buf)
                setprop(devp, "bootargs", buf, strlen(buf) + 1);
 }
 
-/* Section where ft can be tacked on after zImage is built */
-union blobspace {
-       struct boot_param_header hdr;
-       char space[8*1024];
-} dt_blob __attribute__((__section__("__builtin_ft")));
-
 struct platform_ops platform_ops;
 struct dt_ops dt_ops;
 struct console_ops console_ops;
 
 void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
 {
-       int have_dt = 0;
        kernel_entry_t kentry;
        char cmdline[COMMAND_LINE_SIZE];
+       unsigned long ft_addr = 0;
 
        memset(__bss_start, 0, _end - __bss_start);
        memset(&platform_ops, 0, sizeof(platform_ops));
        memset(&dt_ops, 0, sizeof(dt_ops));
        memset(&console_ops, 0, sizeof(console_ops));
 
-       /* Override the dt_ops and device tree if there was an flat dev
-        * tree attached to the zImage.
-        */
-       if (dt_blob.hdr.magic == OF_DT_HEADER) {
-               have_dt = 1;
-               ft_init(&dt_blob);
-       }
-
-       if (platform_init(promptr))
+       if (platform_init(promptr, _dtb_start, _dtb_end))
                exit();
        if (console_ops.open && (console_ops.open() < 0))
                exit();
@@ -324,7 +315,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
        printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r",
               _start, sp);
 
-       prep_kernel(&a1, &a2);
+       prep_kernel(a1, a2);
 
        /* If cmdline came from zimage wrapper or if we can edit the one
         * in the dt, print it out and edit it, if possible.
@@ -338,15 +329,23 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
                set_cmdline(cmdline);
        }
 
+       printf("Finalizing device tree...");
+       if (dt_ops.finalize)
+               ft_addr = dt_ops.finalize();
+       if (ft_addr)
+               printf(" flat tree at 0x%lx\n\r", ft_addr);
+       else
+               printf(" using OF tree (promptr=%p)\n\r", promptr);
+
        if (console_ops.close)
                console_ops.close();
 
        kentry = (kernel_entry_t) vmlinux.addr;
-       if (have_dt)
-               kentry(dt_ops.ft_addr(), 0, NULL);
+       if (ft_addr)
+               kentry(ft_addr, 0, NULL);
        else
                /* XXX initrd addr/size should be passed in properties */
-               kentry(a1, a2, promptr);
+               kentry(initrd.addr, initrd.size, promptr);
 
        /* console closed so printf below may not work */
        printf("Error: Linux kernel returned to zImage boot wrapper!\n\r");
diff --git a/arch/powerpc/boot/mktree.c b/arch/powerpc/boot/mktree.c
new file mode 100644 (file)
index 0000000..4cb8929
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Makes a tree bootable image for IBM Evaluation boards.
+ * Basically, just take a zImage, skip the ELF header, and stuff
+ * a 32 byte header on the front.
+ *
+ * We use htonl, which is a network macro, to make sure we're doing
+ * The Right Thing on an LE machine.  It's non-obvious, but it should
+ * work on anything BSD'ish.
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#ifdef __sun__
+#include <inttypes.h>
+#else
+#include <stdint.h>
+#endif
+
+/* This gets tacked on the front of the image.  There are also a few
+ * bytes allocated after the _start label used by the boot rom (see
+ * head.S for details).
+ */
+typedef struct boot_block {
+       uint32_t bb_magic;              /* 0x0052504F */
+       uint32_t bb_dest;               /* Target address of the image */
+       uint32_t bb_num_512blocks;      /* Size, rounded-up, in 512 byte blks */
+       uint32_t bb_debug_flag; /* Run debugger or image after load */
+       uint32_t bb_entry_point;        /* The image address to start */
+       uint32_t bb_checksum;   /* 32 bit checksum including header */
+       uint32_t reserved[2];
+} boot_block_t;
+
+#define IMGBLK 512
+char   tmpbuf[IMGBLK];
+
+int main(int argc, char *argv[])
+{
+       int     in_fd, out_fd;
+       int     nblks, i;
+       uint    cksum, *cp;
+       struct  stat    st;
+       boot_block_t    bt;
+
+       if (argc < 3) {
+               fprintf(stderr, "usage: %s <zImage-file> <boot-image> [entry-point]\n",argv[0]);
+               exit(1);
+       }
+
+       if (stat(argv[1], &st) < 0) {
+               perror("stat");
+               exit(2);
+       }
+
+       nblks = (st.st_size + IMGBLK) / IMGBLK;
+
+       bt.bb_magic = htonl(0x0052504F);
+
+       /* If we have the optional entry point parameter, use it */
+       if (argc == 4)
+               bt.bb_dest = bt.bb_entry_point = htonl(strtoul(argv[3], NULL, 0));
+       else
+               bt.bb_dest = bt.bb_entry_point = htonl(0x500000);
+
+       /* We know these from the linker command.
+        * ...and then move it up into memory a little more so the
+        * relocation can happen.
+        */
+       bt.bb_num_512blocks = htonl(nblks);
+       bt.bb_debug_flag = 0;
+
+       bt.bb_checksum = 0;
+
+       /* To be neat and tidy :-).
+       */
+       bt.reserved[0] = 0;
+       bt.reserved[1] = 0;
+
+       if ((in_fd = open(argv[1], O_RDONLY)) < 0) {
+               perror("zImage open");
+               exit(3);
+       }
+
+       if ((out_fd = open(argv[2], (O_RDWR | O_CREAT | O_TRUNC), 0666)) < 0) {
+               perror("bootfile open");
+               exit(3);
+       }
+
+       cksum = 0;
+       cp = (void *)&bt;
+       for (i=0; i<sizeof(bt)/sizeof(uint); i++)
+               cksum += *cp++;
+
+       /* Assume zImage is an ELF file, and skip the 64K header.
+       */
+       if (read(in_fd, tmpbuf, IMGBLK) != IMGBLK) {
+               fprintf(stderr, "%s is too small to be an ELF image\n",
+                               argv[1]);
+               exit(4);
+       }
+
+       if ((*(uint *)tmpbuf) != htonl(0x7f454c46)) {
+               fprintf(stderr, "%s is not an ELF image\n", argv[1]);
+               exit(4);
+       }
+
+       if (lseek(in_fd, (64 * 1024), SEEK_SET) < 0) {
+               fprintf(stderr, "%s failed to seek in ELF image\n", argv[1]);
+               exit(4);
+       }
+
+       nblks -= (64 * 1024) / IMGBLK;
+
+       /* And away we go......
+       */
+       if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+               perror("boot-image write");
+               exit(5);
+       }
+
+       while (nblks-- > 0) {
+               if (read(in_fd, tmpbuf, IMGBLK) < 0) {
+                       perror("zImage read");
+                       exit(5);
+               }
+               cp = (uint *)tmpbuf;
+               for (i=0; i<sizeof(tmpbuf)/sizeof(uint); i++)
+                       cksum += *cp++;
+               if (write(out_fd, tmpbuf, sizeof(tmpbuf)) != sizeof(tmpbuf)) {
+                       perror("boot-image write");
+                       exit(5);
+               }
+       }
+
+       /* rewrite the header with the computed checksum.
+       */
+       bt.bb_checksum = htonl(cksum);
+       if (lseek(out_fd, 0, SEEK_SET) < 0) {
+               perror("rewrite seek");
+               exit(1);
+       }
+       if (write(out_fd, &bt, sizeof(bt)) != sizeof(bt)) {
+               perror("boot-image rewrite");
+               exit(1);
+       }
+
+       exit(0);
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
new file mode 100644 (file)
index 0000000..1ffe72e
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * 16550 serial console support.
+ *
+ * Original copied from <file:arch/ppc/boot/common/ns16550.c>
+ * (which had no copyright)
+ * Modifications: 2006 (c) MontaVista Software, Inc.
+ *
+ * Modified by: Mark A. Greer <mgreer@mvista.com>
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+#define UART_DLL       0       /* Out: Divisor Latch Low */
+#define UART_DLM       1       /* Out: Divisor Latch High */
+#define UART_FCR       2       /* Out: FIFO Control Register */
+#define UART_LCR       3       /* Out: Line Control Register */
+#define UART_MCR       4       /* Out: Modem Control Register */
+#define UART_LSR       5       /* In:  Line Status Register */
+#define UART_LSR_THRE  0x20    /* Transmit-hold-register empty */
+#define UART_LSR_DR    0x01    /* Receiver data ready */
+#define UART_MSR       6       /* In:  Modem Status Register */
+#define UART_SCR       7       /* I/O: Scratch Register */
+
+static unsigned char *reg_base;
+static u32 reg_shift;
+
+static int ns16550_open(void)
+{
+       out_8(reg_base + (UART_FCR << reg_shift), 0x06);
+       return 0;
+}
+
+static void ns16550_putc(unsigned char c)
+{
+       while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0);
+       out_8(reg_base, c);
+}
+
+static unsigned char ns16550_getc(void)
+{
+       while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0);
+       return in_8(reg_base);
+}
+
+static u8 ns16550_tstc(void)
+{
+       return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0);
+}
+
+int ns16550_console_init(void *devp, struct serial_console_data *scdp)
+{
+       int n;
+
+       n = getprop(devp, "virtual-reg", &reg_base, sizeof(reg_base));
+       if (n != sizeof(reg_base))
+               return -1;
+
+       n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
+       if (n != sizeof(reg_shift))
+               reg_shift = 0;
+
+       scdp->open = ns16550_open;
+       scdp->putc = ns16550_putc;
+       scdp->getc = ns16550_getc;
+       scdp->tstc = ns16550_tstc;
+       scdp->close = NULL;
+
+       return 0;
+}
index 3a71845afc6c584670075fbc9a152febcf7819de..0182f384f3e6cc80d2111ad9682070ce553069a0 100644 (file)
@@ -256,24 +256,18 @@ static void of_console_write(char *buf, int len)
        call_prom("write", 3, 1, of_stdout_handle, buf, len);
 }
 
-int platform_init(void *promptr)
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end)
 {
-       platform_ops.fixups = NULL;
        platform_ops.image_hdr = of_image_hdr;
        platform_ops.malloc = of_try_claim;
-       platform_ops.free = NULL;
        platform_ops.exit = of_exit;
 
        dt_ops.finddevice = of_finddevice;
        dt_ops.getprop = of_getprop;
        dt_ops.setprop = of_setprop;
-       dt_ops.translate_addr = NULL;
 
        console_ops.open = of_console_open;
        console_ops.write = of_console_write;
-       console_ops.edit_cmdline = NULL;
-       console_ops.close = NULL;
-       console_ops.data = NULL;
 
        prom = (int (*)(void *))promptr;
        return 0;
index 135eb4bb03b45696acb3f89fe8c24fe4886338e2..8abb6516bb7c1833a90fb8341ac459751728b921 100644 (file)
@@ -22,7 +22,8 @@ struct platform_ops {
        void    (*fixups)(void);
        void    (*image_hdr)(const void *);
        void *  (*malloc)(u32 size);
-       void    (*free)(void *ptr, u32 size);
+       void    (*free)(void *ptr);
+       void *  (*realloc)(void *ptr, unsigned long size);
        void    (*exit)(void);
 };
 extern struct platform_ops platform_ops;
@@ -30,13 +31,11 @@ extern struct platform_ops platform_ops;
 /* Device Tree operations */
 struct dt_ops {
        void *  (*finddevice)(const char *name);
-       int     (*getprop)(const void *node, const char *name, void *buf,
+       int     (*getprop)(const void *phandle, const char *name, void *buf,
                        const int buflen);
-       int     (*setprop)(const void *node, const char *name,
+       int     (*setprop)(const void *phandle, const char *name,
                        const void *buf, const int buflen);
-       u64     (*translate_addr)(const char *path, const u32 *in_addr,
-                       const u32 addr_len);
-       unsigned long (*ft_addr)(void);
+       unsigned long (*finalize)(void);
 };
 extern struct dt_ops dt_ops;
 
@@ -59,10 +58,13 @@ struct serial_console_data {
        void            (*close)(void);
 };
 
-extern int platform_init(void *promptr);
-extern void simple_alloc_init(void);
-extern void ft_init(void *dt_blob);
-extern int serial_console_init(void);
+int platform_init(void *promptr, char *dt_blob_start, char *dt_blob_end);
+int ft_init(void *dt_blob, unsigned int max_size, unsigned int max_find_device);
+int serial_console_init(void);
+int ns16550_console_init(void *devp, struct serial_console_data *scdp);
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+               u32 max_allocs);
+
 
 static inline void *finddevice(const char *name)
 {
@@ -84,10 +86,10 @@ static inline void *malloc(u32 size)
        return (platform_ops.malloc) ? platform_ops.malloc(size) : NULL;
 }
 
-static inline void free(void *ptr, u32 size)
+static inline void free(void *ptr)
 {
        if (platform_ops.free)
-               platform_ops.free(ptr, size);
+               platform_ops.free(ptr);
 }
 
 static inline void exit(void)
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
new file mode 100644 (file)
index 0000000..e8de4cf
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Generic serial console support
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * Code in serial_edit_cmdline() copied from <file:arch/ppc/boot/simple/misc.c>
+ * and was written by Matt Porter <mporter@kernel.crashing.org>.
+ *
+ * 2001,2006 (c) MontaVista Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "types.h"
+#include "string.h"
+#include "stdio.h"
+#include "io.h"
+#include "ops.h"
+
+extern void udelay(long delay);
+
+static int serial_open(void)
+{
+       struct serial_console_data *scdp = console_ops.data;
+       return scdp->open();
+}
+
+static void serial_write(char *buf, int len)
+{
+       struct serial_console_data *scdp = console_ops.data;
+
+       while (*buf != '\0')
+               scdp->putc(*buf++);
+}
+
+static void serial_edit_cmdline(char *buf, int len)
+{
+       int timer = 0, count;
+       char ch, *cp;
+       struct serial_console_data *scdp = console_ops.data;
+
+       cp = buf;
+       count = strlen(buf);
+       cp = &buf[count];
+       count++;
+
+       while (timer++ < 5*1000) {
+               if (scdp->tstc()) {
+                       while (((ch = scdp->getc()) != '\n') && (ch != '\r')) {
+                               /* Test for backspace/delete */
+                               if ((ch == '\b') || (ch == '\177')) {
+                                       if (cp != buf) {
+                                               cp--;
+                                               count--;
+                                               printf("\b \b");
+                                       }
+                               /* Test for ^x/^u (and wipe the line) */
+                               } else if ((ch == '\030') || (ch == '\025')) {
+                                       while (cp != buf) {
+                                               cp--;
+                                               count--;
+                                               printf("\b \b");
+                                       }
+                               } else if (count < len) {
+                                               *cp++ = ch;
+                                               count++;
+                                               scdp->putc(ch);
+                               }
+                       }
+                       break;  /* Exit 'timer' loop */
+               }
+               udelay(1000);  /* 1 msec */
+       }
+       *cp = 0;
+}
+
+static void serial_close(void)
+{
+       struct serial_console_data *scdp = console_ops.data;
+
+       if (scdp->close)
+               scdp->close();
+}
+
+static void *serial_get_stdout_devp(void)
+{
+       void *devp;
+       char devtype[MAX_PROP_LEN];
+       char path[MAX_PATH_LEN];
+
+       devp = finddevice("/chosen");
+       if (devp == NULL)
+               goto err_out;
+
+       if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
+               devp = finddevice(path);
+               if (devp == NULL)
+                       goto err_out;
+
+               if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
+                               && !strcmp(devtype, "serial"))
+                       return devp;
+       }
+err_out:
+       return NULL;
+}
+
+static struct serial_console_data serial_cd;
+
+/* Node's "compatible" property determines which serial driver to use */
+int serial_console_init(void)
+{
+       void *devp;
+       int rc = -1;
+       char compat[MAX_PROP_LEN];
+
+       devp = serial_get_stdout_devp();
+       if (devp == NULL)
+               goto err_out;
+
+       if (getprop(devp, "compatible", compat, sizeof(compat)) < 0)
+               goto err_out;
+
+       if (!strcmp(compat, "ns16550"))
+               rc = ns16550_console_init(devp, &serial_cd);
+
+       /* Add other serial console driver calls here */
+
+       if (!rc) {
+               console_ops.open = serial_open;
+               console_ops.write = serial_write;
+               console_ops.edit_cmdline = serial_edit_cmdline;
+               console_ops.close = serial_close;
+               console_ops.data = &serial_cd;
+
+               return 0;
+       }
+err_out:
+       return -1;
+}
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
new file mode 100644 (file)
index 0000000..cfe3a75
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Implement primitive realloc(3) functionality.
+ *
+ * Author: Mark A. Greer <mgreer@mvista.com>
+ *
+ * 2006 (c) MontaVista, Software, Inc.  This file is licensed under
+ * the terms of the GNU General Public License version 2.  This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <stddef.h>
+#include "types.h"
+#include "page.h"
+#include "string.h"
+#include "ops.h"
+
+#define        ENTRY_BEEN_USED 0x01
+#define        ENTRY_IN_USE    0x02
+
+static struct alloc_info {
+       u32     flags;
+       u32     base;
+       u32     size;
+} *alloc_tbl;
+
+static u32 tbl_entries;
+static u32 alloc_min;
+static u32 next_base;
+static u32 space_left;
+
+/*
+ * First time an entry is used, its base and size are set.
+ * An entry can be freed and re-malloc'd but its base & size don't change.
+ * Should be smart enough for needs of bootwrapper.
+ */
+static void *simple_malloc(u32 size)
+{
+       u32 i;
+       struct alloc_info *p = alloc_tbl;
+
+       if (size == 0)
+               goto err_out;
+
+       size = _ALIGN_UP(size, alloc_min);
+
+       for (i=0; i<tbl_entries; i++, p++)
+               if (!(p->flags & ENTRY_BEEN_USED)) { /* never been used */
+                       if (size <= space_left) {
+                               p->base = next_base;
+                               p->size = size;
+                               p->flags = ENTRY_BEEN_USED | ENTRY_IN_USE;
+                               next_base += size;
+                               space_left -= size;
+                               return (void *)p->base;
+                       }
+                       goto err_out; /* not enough space left */
+               }
+               /* reuse an entry keeping same base & size */
+               else if (!(p->flags & ENTRY_IN_USE) && (size <= p->size)) {
+                       p->flags |= ENTRY_IN_USE;
+                       return (void *)p->base;
+               }
+err_out:
+       return NULL;
+}
+
+static struct alloc_info *simple_find_entry(void *ptr)
+{
+       u32 i;
+       struct alloc_info *p = alloc_tbl;
+
+       for (i=0; i<tbl_entries; i++,p++) {
+               if (!(p->flags & ENTRY_BEEN_USED))
+                       break;
+               if ((p->flags & ENTRY_IN_USE) && (p->base == (u32)ptr))
+                       return p;
+       }
+       return NULL;
+}
+
+static void simple_free(void *ptr)
+{
+       struct alloc_info *p = simple_find_entry(ptr);
+
+       if (p != NULL)
+               p->flags &= ~ENTRY_IN_USE;
+}
+
+/*
+ * Change size of area pointed to by 'ptr' to 'size'.
+ * If 'ptr' is NULL, then its a malloc().  If 'size' is 0, then its a free().
+ * 'ptr' must be NULL or a pointer to a non-freed area previously returned by
+ * simple_realloc() or simple_malloc().
+ */
+static void *simple_realloc(void *ptr, unsigned long size)
+{
+       struct alloc_info *p;
+       void *new;
+
+       if (size == 0) {
+               simple_free(ptr);
+               return NULL;
+       }
+
+       if (ptr == NULL)
+               return simple_malloc(size);
+
+       p = simple_find_entry(ptr);
+       if (p == NULL) /* ptr not from simple_malloc/simple_realloc */
+               return NULL;
+       if (size <= p->size) /* fits in current block */
+               return ptr;
+
+       new = simple_malloc(size);
+       memcpy(new, ptr, p->size);
+       simple_free(ptr);
+       return new;
+}
+
+/*
+ * Returns addr of first byte after heap so caller can see if it took
+ * too much space.  If so, change args & try again.
+ */
+void *simple_alloc_init(char *base, u32 heap_size, u32 granularity,
+               u32 max_allocs)
+{
+       u32 heap_base, tbl_size;
+
+       heap_size = _ALIGN_UP(heap_size, granularity);
+       alloc_min = granularity;
+       tbl_entries = max_allocs;
+
+       tbl_size = tbl_entries * sizeof(struct alloc_info);
+
+       alloc_tbl = (struct alloc_info *)_ALIGN_UP((unsigned long)base, 8);
+       memset(alloc_tbl, 0, tbl_size);
+
+       heap_base = _ALIGN_UP((u32)alloc_tbl + tbl_size, alloc_min);
+
+       next_base = heap_base;
+       space_left = heap_size;
+
+       platform_ops.malloc = simple_malloc;
+       platform_ops.free = simple_free;
+       platform_ops.realloc = simple_realloc;
+
+       return (void *)(heap_base + heap_size);
+}
index 6d5f6382e1ce26e843b4d70d8efa990aa664c70f..0a9feeb983424f53e98b047609f510adab849f26 100644 (file)
@@ -320,6 +320,7 @@ printf(const char *fmt, ...)
        va_start(args, fmt);
        n = vsprintf(sprint_buf, fmt, args);
        va_end(args);
-       console_ops.write(sprint_buf, n);
+       if (console_ops.write)
+               console_ops.write(sprint_buf, n);
        return n;
 }
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
new file mode 100644 (file)
index 0000000..427ddfc
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copied from <file:arch/powerpc/kernel/misc_32.S>
+ *
+ * This file contains miscellaneous low-level functions.
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * kexec bits:
+ * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include "ppc_asm.h"
+
+#define SPRN_PVR        0x11F   /* Processor Version Register */
+
+       .text
+
+/* udelay (on non-601 processors) needs to know the period of the
+ * timebase in nanoseconds.  This used to be hardcoded to be 60ns
+ * (period of 66MHz/4).  Now a variable is used that is initialized to
+ * 60 for backward compatibility, but it can be overridden as necessary
+ * with code something like this:
+ *    extern unsigned long timebase_period_ns;
+ *    timebase_period_ns = 1000000000 / bd->bi_tbfreq;
+ */
+       .data
+       .globl timebase_period_ns
+timebase_period_ns:
+       .long   60
+
+       .text
+/*
+ * Delay for a number of microseconds
+ */
+       .globl  udelay
+udelay:
+       mfspr   r4,SPRN_PVR
+       srwi    r4,r4,16
+       cmpwi   0,r4,1          /* 601 ? */
+       bne     .udelay_not_601
+00:    li      r0,86   /* Instructions / microsecond? */
+       mtctr   r0
+10:    addi    r0,r0,0 /* NOP */
+       bdnz    10b
+       subic.  r3,r3,1
+       bne     00b
+       blr
+
+.udelay_not_601:
+       mulli   r4,r3,1000      /* nanoseconds */
+       /*  Change r4 to be the number of ticks using:
+        *      (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
+        *  timebase_period_ns defaults to 60 (16.6MHz) */
+       mflr    r5
+       bl      0f
+0:     mflr    r6
+       mtlr    r5
+       lis     r5,0b@ha
+       addi    r5,r5,0b@l
+       subf    r5,r5,r6        /* In case we're relocated */
+       addis   r5,r5,timebase_period_ns@ha
+       lwz     r5,timebase_period_ns@l(r5)
+       add     r4,r4,r5
+       addi    r4,r4,-1
+       divw    r4,r4,r5        /* BUS ticks */
+1:     mftbu   r5
+       mftb    r6
+       mftbu   r7
+       cmpw    0,r5,r7
+       bne     1b              /* Get [synced] base time */
+       addc    r9,r6,r4        /* Compute end time */
+       addze   r8,r5
+2:     mftbu   r5
+       cmpw    0,r5,r8
+       blt     2b
+       bgt     3f
+       mftb    r6
+       cmpw    0,r6,r9
+       blt     2b
+3:     blr
index b5fb1fee76f8ca0a1e6bf1961c167c458d05ae90..024e4d425c596b30fd9b31817d0dbc2bb0a4a990 100755 (executable)
@@ -184,6 +184,9 @@ fi
 
 if [ -n "$dtb" ]; then
     addsec $tmp "$dtb" .kernel:dtb
+    if [ -n "$dts" ]; then
+       rm $dtb
+    fi
 fi
 
 if [ "$platform" != "miboot" ]; then
index 05f32388b953026924c90834b13a838faabbda35..a360905e54282b7908e55203d538dfb730746030 100644 (file)
@@ -21,6 +21,10 @@ SECTIONS
     *(.got2)
     __got2_end = .;
 
+    _dtb_start = .;
+    *(.kernel:dtb)
+    _dtb_end = .;
+
     _vmlinux_start =  .;
     *(.kernel:vmlinux.strip)
     _vmlinux_end =  .;
index 0aba06d7d2eccad8215c096e2b9e8502d96c49f2..a98c982c73adac65956e939be1c58af65e08a6a5 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18
-# Wed Oct  4 15:30:50 2006
+# Linux kernel version: 2.6.19-rc6
+# Wed Nov 22 15:33:04 2006
 #
 CONFIG_PPC64=y
 CONFIG_64BIT=y
@@ -32,6 +32,10 @@ CONFIG_AUDIT_ARCH=y
 CONFIG_POWER3=y
 CONFIG_POWER4=y
 CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+CONFIG_PPC_DCR_MMIO=y
+CONFIG_PPC_DCR=y
+CONFIG_PPC_OF_PLATFORM_PCI=y
 CONFIG_ALTIVEC=y
 CONFIG_PPC_STD_MMU=y
 CONFIG_VIRT_CPU_ACCOUNTING=y
@@ -67,7 +71,7 @@ CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -131,6 +135,7 @@ CONFIG_PPC_CELL=y
 CONFIG_PPC_CELL_NATIVE=y
 CONFIG_PPC_IBM_CELL_BLADE=y
 CONFIG_UDBG_RTAS_CONSOLE=y
+CONFIG_PPC_PS3=y
 # CONFIG_U3_DART is not set
 CONFIG_PPC_RTAS=y
 # CONFIG_RTAS_ERROR_LOGGING is not set
@@ -139,9 +144,23 @@ CONFIG_RTAS_FLASH=y
 CONFIG_MMIO_NVRAM=y
 # CONFIG_PPC_MPC106 is not set
 # CONFIG_PPC_970_NAP is not set
-# CONFIG_CPU_FREQ is not set
+CONFIG_PPC_INDIRECT_IO=y
+CONFIG_GENERIC_IOMAP=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_DEBUG=y
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+# CONFIG_CPU_FREQ_PMAC64 is not set
 # CONFIG_WANT_EARLY_SERIAL is not set
-# CONFIG_MPIC is not set
+CONFIG_MPIC=y
 
 #
 # Cell Broadband Engine options
@@ -149,6 +168,15 @@ CONFIG_MMIO_NVRAM=y
 CONFIG_SPU_FS=m
 CONFIG_SPU_BASE=y
 CONFIG_CBE_RAS=y
+CONFIG_CBE_THERM=m
+CONFIG_CBE_CPUFREQ=m
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+# CONFIG_PS3_DYNAMIC_DMA is not set
+CONFIG_PS3_USE_LPAR_ADDR=y
 
 #
 # Kernel options
@@ -166,13 +194,14 @@ CONFIG_BINFMT_MISC=m
 CONFIG_FORCE_MAX_ZONEORDER=9
 # CONFIG_IOMMU_VMERGE is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_KEXEC=y
+# CONFIG_KEXEC is not set
 # CONFIG_CRASH_DUMP is not set
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_NUMA=y
 CONFIG_NODES_SHIFT=4
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_SELECT_MEMORY_MODEL=y
 # CONFIG_FLATMEM_MANUAL is not set
@@ -189,6 +218,7 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_MIGRATION=y
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_NODES_SPAN_OTHER_NODES=y
 CONFIG_PPC_64K_PAGES=y
 CONFIG_SCHED_SMT=y
 CONFIG_PROC_DEVICETREE=y
@@ -207,7 +237,6 @@ CONFIG_GENERIC_ISA_DMA=y
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 CONFIG_PCIEPORTBUS=y
-# CONFIG_PCI_MULTITHREAD_PROBE is not set
 # CONFIG_PCI_DEBUG is not set
 
 #
@@ -280,7 +309,6 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
 # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
 # CONFIG_IPV6_SIT is not set
 CONFIG_IPV6_TUNNEL=m
-# CONFIG_IPV6_SUBTREES is not set
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
 # CONFIG_NETWORK_SECMARK is not set
 CONFIG_NETFILTER=y
@@ -1107,7 +1135,8 @@ CONFIG_PLIST=y
 #
 # Instrumentation Support
 #
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
 # CONFIG_KPROBES is not set
 
 #
@@ -1142,6 +1171,7 @@ CONFIG_DEBUG_FS=y
 CONFIG_DEBUGGER=y
 CONFIG_XMON=y
 CONFIG_XMON_DEFAULT=y
+CONFIG_XMON_DISASSEMBLY=y
 CONFIG_IRQSTACKS=y
 # CONFIG_BOOTX_TEXT is not set
 # CONFIG_PPC_EARLY_DEBUG is not set
@@ -1159,7 +1189,7 @@ CONFIG_CRYPTO=y
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_BLKCIPHER=m
 CONFIG_CRYPTO_HASH=y
-# CONFIG_CRYPTO_MANAGER is not set
+CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_HMAC=y
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_MD4 is not set
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
new file mode 100644 (file)
index 0000000..23fd210
--- /dev/null
@@ -0,0 +1,1583 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Wed Nov 15 20:36:30 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION="-kuroboxHG"
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+# CONFIG_EMBEDDED is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+# CONFIG_PPC_MULTIPLATFORM is not set
+CONFIG_EMBEDDED6xx=y
+# CONFIG_APUS is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_KATANA is not set
+# CONFIG_WILLOW is not set
+# CONFIG_CPCI690 is not set
+# CONFIG_POWERPMC250 is not set
+# CONFIG_CHESTNUT is not set
+# CONFIG_SPRUCE is not set
+# CONFIG_HDPU is not set
+# CONFIG_EV64260 is not set
+# CONFIG_LOPEC is not set
+# CONFIG_MVME5100 is not set
+# CONFIG_PPLUS is not set
+# CONFIG_PRPMC750 is not set
+# CONFIG_PRPMC800 is not set
+# CONFIG_SANDPOINT is not set
+CONFIG_LINKSTATION=y
+# CONFIG_MPC7448HPC2 is not set
+# CONFIG_RADSTONE_PPC7D is not set
+# CONFIG_PAL4 is not set
+# CONFIG_GEMINI is not set
+# CONFIG_EST8260 is not set
+# CONFIG_SBC82xx is not set
+# CONFIG_SBS8260 is not set
+# CONFIG_RPX8260 is not set
+# CONFIG_TQM8260 is not set
+# CONFIG_ADS8272 is not set
+# CONFIG_PQ2FADS is not set
+# CONFIG_LITE5200 is not set
+# CONFIG_EV64360 is not set
+CONFIG_PPC_GEN550=y
+CONFIG_MPC10X_BRIDGE=y
+CONFIG_MPC10X_OPENPIC=y
+# CONFIG_MPC10X_STORE_GATHERING is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_MPIC=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_MULTITHREAD_PROBE is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NETFILTER_XTABLES=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_PPTP is not set
+# CONFIG_IP_NF_H323 is not set
+# CONFIG_IP_NF_SIP is not set
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_IEEE80211=m
+CONFIG_IEEE80211_DEBUG=y
+CONFIG_IEEE80211_CRYPT_WEP=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+CONFIG_IEEE80211_SOFTMAC=m
+CONFIG_IEEE80211_SOFTMAC_DEBUG=y
+CONFIG_WIRELESS_EXT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=m
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+# CONFIG_MTD_MAP_BANK_WIDTH_2 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+# CONFIG_MTD_CFI_I2 is not set
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_OTP is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0xffc00000
+CONFIG_MTD_PHYSMAP_LEN=0x400000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=1
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+CONFIG_PATA_SIL680=y
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_R8169=y
+# CONFIG_R8169_NAPI is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_AIRO is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_BCM43XX is not set
+# CONFIG_ZD1211RW is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+CONFIG_NETCONSOLE=y
+CONFIG_NETPOLL=y
+# CONFIG_NETPOLL_RX is not set
+# CONFIG_NETPOLL_TRAP is not set
+CONFIG_NET_POLL_CONTROLLER=y
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_SPLIT_ISO is not set
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_TOUCHSCREEN is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+
+#
+# RTC drivers
+#
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_RS5C372=y
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_TEST is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=m
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+# CONFIG_NTFS_RW is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_NFSD_TCP=y
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_CRC32C=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/lite5200_defconfig b/arch/powerpc/configs/lite5200_defconfig
new file mode 100644 (file)
index 0000000..ee76557
--- /dev/null
@@ -0,0 +1,931 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Mon Nov 27 11:08:20 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_86xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_6xx=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_CHRP is not set
+CONFIG_PPC_MPC52xx=y
+# CONFIG_PPC_EFIKA is not set
+CONFIG_PPC_LITE5200=y
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+# CONFIG_PM_SYSFS_DEPRECATED is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_INDIRECT_PCI is not set
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCIEPORTBUS is not set
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+CONFIG_ATA=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+CONFIG_PATA_MPC52xx=y
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_MV643XX_ETH is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
index be11df7c11aa4ea455022791b7e8847449f371ee..1c009651f9250f18eb19bcc8a759c46e4307a8ce 100644 (file)
@@ -1386,8 +1386,8 @@ CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
 CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-# CONFIG_AUTOFS4_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_AUTOFS4_FS=m
 # CONFIG_FUSE_FS is not set
 
 #
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
new file mode 100644 (file)
index 0000000..f2d888e
--- /dev/null
@@ -0,0 +1,837 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc6
+# Tue Nov 21 19:38:53 2006
+#
+CONFIG_PPC64=y
+CONFIG_64BIT=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+# CONFIG_DEFAULT_UIMAGE is not set
+
+#
+# Processor support
+#
+# CONFIG_POWER4_ONLY is not set
+CONFIG_POWER3=y
+CONFIG_POWER4=y
+CONFIG_PPC_FPU=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+# CONFIG_PPC_OF_PLATFORM_PCI is not set
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_PSERIES is not set
+# CONFIG_PPC_ISERIES is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_MAPLE is not set
+# CONFIG_PPC_PASEMI is not set
+CONFIG_PPC_CELL=y
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PPC_IBM_CELL_BLADE is not set
+CONFIG_PPC_PS3=y
+# CONFIG_U3_DART is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+# CONFIG_MPIC is not set
+
+#
+# Cell Broadband Engine options
+#
+CONFIG_SPU_FS=y
+CONFIG_SPU_BASE=y
+# CONFIG_CBE_RAS is not set
+
+#
+# PS3 Platform Options
+#
+CONFIG_PS3_HTAB_SIZE=20
+CONFIG_PS3_DYNAMIC_DMA=y
+CONFIG_PS3_USE_LPAR_ADDR=y
+
+#
+# Kernel options
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_BKL is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+CONFIG_FORCE_MAX_ZONEORDER=9
+# CONFIG_IOMMU_VMERGE is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+# CONFIG_IRQ_ALL_CPUS is not set
+# CONFIG_NUMA is not set
+CONFIG_ARCH_SELECT_MEMORY_MODEL=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_SPARSE=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_ARCH_MEMORY_PROBE=y
+CONFIG_PPC_64K_PAGES=y
+# CONFIG_SCHED_SMT is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="root=/dev/nfs rw ip=dhcp"
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+CONFIG_KERNEL_START=0xc000000000000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+
+#
+# Ethernet (10 or 100Mbit)
+#
+# CONFIG_NET_ETHERNET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC32 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+# CONFIG_KPROBES is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_RWSEMS is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_DEBUG_LIST=y
+CONFIG_FORCED_INLINING=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUGGER is not set
+CONFIG_IRQSTACKS=y
+# CONFIG_BOOTX_TEXT is not set
+CONFIG_PPC_EARLY_DEBUG=y
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
index 7af23c43fd4b502a6c7f32478b5010085281ff60..4fe53d08ab817e79a4810b1492e3a5e4bff02661 100644 (file)
@@ -17,11 +17,11 @@ obj-y                               += vdso32/
 obj-$(CONFIG_PPC64)            += setup_64.o binfmt_elf32.o sys_ppc32.o \
                                   signal_64.o ptrace32.o \
                                   paca.o cpu_setup_ppc970.o \
-                                  firmware.o sysfs.o
+                                  firmware.o sysfs.o nvram_64.o
 obj-$(CONFIG_PPC64)            += vdso64/
 obj-$(CONFIG_ALTIVEC)          += vecemu.o vector.o
 obj-$(CONFIG_PPC_970_NAP)      += idle_power4.o
-obj-$(CONFIG_PPC_OF)           += of_device.o prom_parse.o
+obj-$(CONFIG_PPC_OF)           += of_device.o of_platform.o prom_parse.o
 procfs-$(CONFIG_PPC64)         := proc_ppc64.o
 obj-$(CONFIG_PROC_FS)          += $(procfs-y)
 rtaspci-$(CONFIG_PPC64)                := rtas_pci.o
@@ -32,7 +32,6 @@ obj-$(CONFIG_LPARCFG)         += lparcfg.o
 obj-$(CONFIG_IBMVIO)           += vio.o
 obj-$(CONFIG_IBMEBUS)           += ibmebus.o
 obj-$(CONFIG_GENERIC_TBSYNC)   += smp-tbsync.o
-obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_6xx)              += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
 obj-$(CONFIG_TAU)              += tau_6xx.o
@@ -59,11 +58,11 @@ obj-$(CONFIG_BOOTX_TEXT)    += btext.o
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_PPC_UDBG_16550)   += legacy_serial.o udbg_16550.o
+
 module-$(CONFIG_PPC64)         += module_64.o
 obj-$(CONFIG_MODULES)          += $(module-y)
 
-pci64-$(CONFIG_PPC64)          += pci_64.o pci_dn.o pci_iommu.o \
-                                  pci_direct_iommu.o iomap.o
+pci64-$(CONFIG_PPC64)          += pci_64.o pci_dn.o
 pci32-$(CONFIG_PPC32)          := pci_32.o
 obj-$(CONFIG_PCI)              += $(pci64-y) $(pci32-y)
 kexec-$(CONFIG_PPC64)          := machine_kexec_64.o
@@ -72,8 +71,12 @@ obj-$(CONFIG_KEXEC)          += machine_kexec.o crash.o $(kexec-y)
 obj-$(CONFIG_AUDIT)            += audit.o
 obj64-$(CONFIG_AUDIT)          += compat_audit.o
 
+ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
+obj-y                          += iomap.o
+endif
+
 ifeq ($(CONFIG_PPC_ISERIES),y)
-$(obj)/head_64.o: $(obj)/lparmap.s
+extra-y += lparmap.s
 AFLAGS_head_64.o += -I$(obj)
 endif
 
index d06f378597bb4027662ca29158cbc07a2110d348..e96521530d21b3add856b834d8ace4750234e42e 100644 (file)
@@ -118,7 +118,8 @@ int main(void)
        DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
        DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
        DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
-       DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
+       DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
+       DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
        DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
        DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
        DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
index 652594891d58aa90f14a5a07332f2d39ca15b8f0..bf118c3857520a81eb978b016512607f49c8b611 100644 (file)
@@ -83,6 +83,22 @@ _GLOBAL(__setup_cpu_ppc970)
        rldimi  r0,r11,52,8             /* set NAP and DPM */
        li      r11,0
        rldimi  r0,r11,32,31            /* clear EN_ATTN */
+       b       load_hids               /* Jump to shared code */
+
+
+_GLOBAL(__setup_cpu_ppc970MP)
+       /* Do nothing if not running in HV mode */
+       mfmsr   r0
+       rldicl. r0,r0,4,63
+       beqlr
+
+       mfspr   r0,SPRN_HID0
+       li      r11,0x15                /* clear DOZE and SLEEP */
+       rldimi  r0,r11,52,6             /* set DEEPNAP, NAP and DPM */
+       li      r11,0
+       rldimi  r0,r11,32,31            /* clear EN_ATTN */
+
+load_hids:
        mtspr   SPRN_HID0,r0
        mfspr   r0,SPRN_HID0
        mfspr   r0,SPRN_HID0
index bfd499ee37530807a070886409b03a504ccca8e9..9d1614c3ce67d047adf9573dcb40704602783a5a 100644 (file)
@@ -42,6 +42,7 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
 #endif /* CONFIG_PPC32 */
 #ifdef CONFIG_PPC64
 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
+extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
 extern void __restore_cpu_ppc970(void);
 #endif /* CONFIG_PPC64 */
 
@@ -222,9 +223,9 @@ static struct cpu_spec cpu_specs[] = {
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
                .num_pmcs               = 8,
-               .cpu_setup              = __setup_cpu_ppc970,
+               .cpu_setup              = __setup_cpu_ppc970MP,
                .cpu_restore            = __restore_cpu_ppc970,
-               .oprofile_cpu_type      = "ppc64/970",
+               .oprofile_cpu_type      = "ppc64/970MP",
                .oprofile_type          = PPC_OPROFILE_POWER4,
                .platform               = "ppc970",
        },
@@ -276,10 +277,45 @@ static struct cpu_spec cpu_specs[] = {
                .oprofile_mmcra_sipr    = MMCRA_SIPR,
                .platform               = "power5+",
        },
+       {       /* POWER6 in P5+ mode; 2.04-compliant processor */
+               .pvr_mask               = 0xffffffff,
+               .pvr_value              = 0x0f000001,
+               .cpu_name               = "POWER5+",
+               .cpu_features           = CPU_FTRS_POWER5,
+               .cpu_user_features      = COMMON_USER_POWER5_PLUS,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .oprofile_cpu_type      = "ppc64/power6",
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .oprofile_mmcra_sihv    = POWER6_MMCRA_SIHV,
+               .oprofile_mmcra_sipr    = POWER6_MMCRA_SIPR,
+               .oprofile_mmcra_clear   = POWER6_MMCRA_THRM |
+                       POWER6_MMCRA_OTHER,
+               .platform               = "power5+",
+       },
        {       /* Power6 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x003e0000,
-               .cpu_name               = "POWER6",
+               .cpu_name               = "POWER6 (raw)",
+               .cpu_features           = CPU_FTRS_POWER6,
+               .cpu_user_features      = COMMON_USER_POWER6 |
+                       PPC_FEATURE_POWER6_EXT,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .oprofile_cpu_type      = "ppc64/power6",
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .oprofile_mmcra_sihv    = POWER6_MMCRA_SIHV,
+               .oprofile_mmcra_sipr    = POWER6_MMCRA_SIPR,
+               .oprofile_mmcra_clear   = POWER6_MMCRA_THRM |
+                       POWER6_MMCRA_OTHER,
+               .platform               = "power6x",
+       },
+       {       /* 2.05-compliant processor, i.e. Power6 "architected" mode */
+               .pvr_mask               = 0xffffffff,
+               .pvr_value              = 0x0f000002,
+               .cpu_name               = "POWER6 (architected)",
                .cpu_features           = CPU_FTRS_POWER6,
                .cpu_user_features      = COMMON_USER_POWER6,
                .icache_bsize           = 128,
@@ -303,6 +339,9 @@ static struct cpu_spec cpu_specs[] = {
                        PPC_FEATURE_SMT,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
+               .num_pmcs               = 4,
+               .oprofile_cpu_type      = "ppc64/cell-be",
+               .oprofile_type          = PPC_OPROFILE_CELL,
                .platform               = "ppc-cell-be",
        },
        {       /* PA Semi PA6T */
@@ -801,6 +840,17 @@ static struct cpu_spec cpu_specs[] = {
                .cpu_setup              = __setup_cpu_603,
                .platform               = "ppc603",
        },
+       {       /* e300c3 on 83xx  */
+               .pvr_mask               = 0x7fff0000,
+               .pvr_value              = 0x00850000,
+               .cpu_name               = "e300c3",
+               .cpu_features           = CPU_FTRS_E300,
+               .cpu_user_features      = COMMON_USER,
+               .icache_bsize           = 32,
+               .dcache_bsize           = 32,
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
+       },
        {       /* default match, we assume split I/D cache & TB (non-601)... */
                .pvr_mask               = 0x00000000,
                .pvr_value              = 0x00000000,
@@ -1169,19 +1219,15 @@ static struct cpu_spec cpu_specs[] = {
 #endif /* CONFIG_PPC32 */
 };
 
-struct cpu_spec *identify_cpu(unsigned long offset)
+struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr)
 {
        struct cpu_spec *s = cpu_specs;
        struct cpu_spec **cur = &cur_cpu_spec;
-       unsigned int pvr = mfspr(SPRN_PVR);
        int i;
 
        s = PTRRELOC(s);
        cur = PTRRELOC(cur);
 
-       if (*cur != NULL)
-               return PTRRELOC(*cur);
-
        for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
                if ((pvr & s->pvr_mask) == s->pvr_value) {
                        *cur = cpu_specs + i;
index 1af41f7616dc84b280f60832ae7e2e9a9d5378bf..d3f2080d2eeeb3719778352b7f39c4bb2ce7b637 100644 (file)
@@ -46,61 +46,6 @@ int crashing_cpu = -1;
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 cpumask_t cpus_in_sr = CPU_MASK_NONE;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
-                                                              size_t data_len)
-{
-       struct elf_note note;
-
-       note.n_namesz = strlen(name) + 1;
-       note.n_descsz = data_len;
-       note.n_type   = type;
-       memcpy(buf, &note, sizeof(note));
-       buf += (sizeof(note) +3)/4;
-       memcpy(buf, name, note.n_namesz);
-       buf += (note.n_namesz + 3)/4;
-       memcpy(buf, data, note.n_descsz);
-       buf += (note.n_descsz + 3)/4;
-
-       return buf;
-}
-
-static void final_note(u32 *buf)
-{
-       struct elf_note note;
-
-       note.n_namesz = 0;
-       note.n_descsz = 0;
-       note.n_type   = 0;
-       memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-       struct elf_prstatus prstatus;
-       u32 *buf;
-
-       if ((cpu < 0) || (cpu >= NR_CPUS))
-               return;
-
-       /* Using ELF notes here is opportunistic.
-        * I need a well defined structure format
-        * for the data I pass, and I need tags
-        * on the data to indicate what information I have
-        * squirrelled away.  ELF notes happen to provide
-        * all of that that no need to invent something new.
-        */
-       buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-       if (!buf) 
-               return;
-
-       memset(&prstatus, 0, sizeof(prstatus));
-       prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
-       buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-                       sizeof(prstatus));
-       final_note(buf);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
 
@@ -111,9 +56,9 @@ void crash_ipi_callback(struct pt_regs *regs)
        if (!cpu_online(cpu))
                return;
 
-       local_irq_disable();
+       hard_irq_disable();
        if (!cpu_isset(cpu, cpus_in_crash))
-               crash_save_this_cpu(regs, cpu);
+               crash_save_cpu(regs, cpu);
        cpu_set(cpu, cpus_in_crash);
 
        /*
@@ -289,7 +234,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
         * an SMP system.
         * The kernel is broken so disable interrupts.
         */
-       local_irq_disable();
+       hard_irq_disable();
 
        for_each_irq(irq) {
                struct irq_desc *desc = irq_desc + irq;
@@ -306,7 +251,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
         * such that another IPI will not be sent.
         */
        crashing_cpu = smp_processor_id();
-       crash_save_this_cpu(regs, crashing_cpu);
+       crash_save_cpu(regs, crashing_cpu);
        crash_kexec_prepare_cpus(crashing_cpu);
        cpu_set(crashing_cpu, cpus_in_crash);
        if (ppc_md.kexec_cpu_down)
index 6c168f6ea1428963150aa28a85739c6ca9352340..7b0e754383cf53e47affcf4a46acab9b5264761c 100644 (file)
 /*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  *
- * Implements the generic device dma API for ppc64. Handles
- * the pci and vio busses
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses and busses using the iommu infrastructure
  */
 
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
-/* Include the busses we support */
-#include <linux/pci.h>
-#include <asm/vio.h>
-#include <asm/ibmebus.h>
-#include <asm/scatterlist.h>
 #include <asm/bug.h>
+#include <asm/iommu.h>
+#include <asm/abs_addr.h>
 
-static struct dma_mapping_ops *get_dma_ops(struct device *dev)
-{
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
-               return &pci_dma_ops;
-#endif
-#ifdef CONFIG_IBMVIO
-       if (dev->bus == &vio_bus_type)
-               return &vio_dma_ops;
-#endif
-#ifdef CONFIG_IBMEBUS
-       if (dev->bus == &ibmebus_bus_type)
-               return &ibmebus_dma_ops;
-#endif
-       return NULL;
-}
+/*
+ * Generic iommu implementation
+ */
 
-int dma_supported(struct device *dev, u64 mask)
+static inline unsigned long device_to_mask(struct device *dev)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+       if (dev->dma_mask && *dev->dma_mask)
+               return *dev->dma_mask;
+       /* Assume devices without mask can take 32 bit addresses */
+       return 0xfffffffful;
+}
 
-       BUG_ON(!dma_ops);
 
-       return dma_ops->dma_supported(dev, mask);
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
+                                     dma_addr_t *dma_handle, gfp_t flag)
+{
+       return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
+                                   device_to_mask(dev), flag,
+                                   dev->archdata.numa_node);
 }
-EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static void dma_iommu_free_coherent(struct device *dev, size_t size,
+                                   void *vaddr, dma_addr_t dma_handle)
 {
-#ifdef CONFIG_PCI
-       if (dev->bus == &pci_bus_type)
-               return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
-#endif
-#ifdef CONFIG_IBMVIO
-       if (dev->bus == &vio_bus_type)
-               return -EIO;
-#endif /* CONFIG_IBMVIO */
-#ifdef CONFIG_IBMEBUS
-       if (dev->bus == &ibmebus_bus_type)
-               return -EIO;
-#endif
-       BUG();
-       return 0;
+       iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
 }
-EXPORT_SYMBOL(dma_set_mask);
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag)
+/* Creates TCEs for a user provided buffer.  The user buffer must be
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
+                                      size_t size,
+                                      enum dma_data_direction direction)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       BUG_ON(!dma_ops);
-
-       return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+       return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+                               device_to_mask(dev), direction);
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_handle)
+
+static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
+                                  size_t size,
+                                  enum dma_data_direction direction)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+       iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
+}
 
-       BUG_ON(!dma_ops);
 
-       dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
+                           int nelems, enum dma_data_direction direction)
+{
+       return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+                           device_to_mask(dev), direction);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-               enum dma_data_direction direction)
+static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+               int nelems, enum dma_data_direction direction)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       BUG_ON(!dma_ops);
-
-       return dma_ops->map_single(dev, cpu_addr, size, direction);
+       iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
 }
-EXPORT_SYMBOL(dma_map_single);
 
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-               enum dma_data_direction direction)
+/* We support DMA to/from any memory page via the iommu */
+static int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       BUG_ON(!dma_ops);
-
-       dma_ops->unmap_single(dev, dma_addr, size, direction);
+       struct iommu_table *tbl = dev->archdata.dma_data;
+
+       if (!tbl || tbl->it_offset > mask) {
+               printk(KERN_INFO
+                      "Warning: IOMMU offset too big for device mask\n");
+               if (tbl)
+                       printk(KERN_INFO
+                              "mask: 0x%08lx, table offset: 0x%08lx\n",
+                               mask, tbl->it_offset);
+               else
+                       printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+                               mask);
+               return 0;
+       } else
+               return 1;
 }
-EXPORT_SYMBOL(dma_unmap_single);
 
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size,
-               enum dma_data_direction direction)
-{
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+struct dma_mapping_ops dma_iommu_ops = {
+       .alloc_coherent = dma_iommu_alloc_coherent,
+       .free_coherent  = dma_iommu_free_coherent,
+       .map_single     = dma_iommu_map_single,
+       .unmap_single   = dma_iommu_unmap_single,
+       .map_sg         = dma_iommu_map_sg,
+       .unmap_sg       = dma_iommu_unmap_sg,
+       .dma_supported  = dma_iommu_dma_supported,
+};
+EXPORT_SYMBOL(dma_iommu_ops);
 
-       BUG_ON(!dma_ops);
+/*
+ * Generic direct DMA implementation
+ *
+ * This implementation supports a global offset that can be applied if
+ * the address at which memory is visible to devices is not 0.
+ */
+unsigned long dma_direct_offset;
 
-       return dma_ops->map_single(dev, page_address(page) + offset, size,
-                       direction);
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag)
+{
+       struct page *page;
+       void *ret;
+       int node = dev->archdata.numa_node;
+
+       /* TODO: Maybe use the numa node here too ? */
+       page = alloc_pages_node(node, flag, get_order(size));
+       if (page == NULL)
+               return NULL;
+       ret = page_address(page);
+       memset(ret, 0, size);
+       *dma_handle = virt_to_abs(ret) | dma_direct_offset;
+
+       return ret;
 }
-EXPORT_SYMBOL(dma_map_page);
 
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-               enum dma_data_direction direction)
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+                                    void *vaddr, dma_addr_t dma_handle)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+       free_pages((unsigned long)vaddr, get_order(size));
+}
 
-       BUG_ON(!dma_ops);
+static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
+                                       size_t size,
+                                       enum dma_data_direction direction)
+{
+       return virt_to_abs(ptr) | dma_direct_offset;
+}
 
-       dma_ops->unmap_single(dev, dma_address, size, direction);
+static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
+                                   size_t size,
+                                   enum dma_data_direction direction)
+{
 }
-EXPORT_SYMBOL(dma_unmap_page);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction direction)
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sg,
+                            int nents, enum dma_data_direction direction)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+       int i;
 
-       BUG_ON(!dma_ops);
+       for (i = 0; i < nents; i++, sg++) {
+               sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
+                       dma_direct_offset;
+               sg->dma_length = sg->length;
+       }
 
-       return dma_ops->map_sg(dev, sg, nents, direction);
+       return nents;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-               enum dma_data_direction direction)
+static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction direction)
 {
-       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
-
-       BUG_ON(!dma_ops);
+}
 
-       dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+       /* Could be improved to check for memory though it better be
+        * done via some global so platforms can set the limit in case
+        * they have limited DMA windows
+        */
+       return mask >= DMA_32BIT_MASK;
 }
-EXPORT_SYMBOL(dma_unmap_sg);
+
+struct dma_mapping_ops dma_direct_ops = {
+       .alloc_coherent = dma_direct_alloc_coherent,
+       .free_coherent  = dma_direct_free_coherent,
+       .map_single     = dma_direct_map_single,
+       .unmap_single   = dma_direct_unmap_single,
+       .map_sg         = dma_direct_map_sg,
+       .unmap_sg       = dma_direct_unmap_sg,
+       .dma_supported  = dma_direct_dma_supported,
+};
+EXPORT_SYMBOL(dma_direct_ops);
index 748e74fcf541f4f1cb4eb6dcdb4694db218f4ec0..1a3d4de197d2bac728cc2492e3ba9565551d5717 100644 (file)
@@ -87,15 +87,19 @@ system_call_common:
        addi    r9,r1,STACK_FRAME_OVERHEAD
        ld      r11,exception_marker@toc(r2)
        std     r11,-16(r9)             /* "regshere" marker */
+       li      r10,1
+       stb     r10,PACASOFTIRQEN(r13)
+       stb     r10,PACAHARDIRQEN(r13)
+       std     r10,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
        /* Hack for handling interrupts when soft-enabling on iSeries */
        cmpdi   cr1,r0,0x5555           /* syscall 0x5555 */
        andi.   r10,r12,MSR_PR          /* from kernel */
        crand   4*cr0+eq,4*cr1+eq,4*cr0+eq
-       beq     hardware_interrupt_entry
-       lbz     r10,PACAPROCENABLED(r13)
-       std     r10,SOFTE(r1)
+       bne     2f
+       b       hardware_interrupt_entry
+2:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
        mfmsr   r11
@@ -460,9 +464,9 @@ _GLOBAL(ret_from_except_lite)
 #endif
 
 restore:
+       ld      r5,SOFTE(r1)
 #ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
-       ld      r5,SOFTE(r1)
        cmpdi   0,r5,0
        beq     4f
        /* Check for pending interrupts (iSeries) */
@@ -472,21 +476,25 @@ BEGIN_FW_FTR_SECTION
        beq+    4f                      /* skip do_IRQ if no interrupts */
 
        li      r3,0
-       stb     r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+       stb     r3,PACASOFTIRQEN(r13)   /* ensure we are soft-disabled */
        ori     r10,r10,MSR_EE
        mtmsrd  r10                     /* hard-enable again */
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .do_IRQ
        b       .ret_from_except_lite           /* loop back and handle more */
-
-4:     stb     r5,PACAPROCENABLED(r13)
+4:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+       stb     r5,PACASOFTIRQEN(r13)
 
        ld      r3,_MSR(r1)
        andi.   r0,r3,MSR_RI
        beq-    unrecov_restore
 
+       /* extract EE bit and use it to restore paca->hard_enabled */
+       rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
+       stb     r4,PACAHARDIRQEN(r13)
+
        andi.   r0,r3,MSR_PR
 
        /*
@@ -538,25 +546,15 @@ do_work:
        /* Check that preempt_count() == 0 and interrupts are enabled */
        lwz     r8,TI_PREEMPT(r9)
        cmpwi   cr1,r8,0
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
        ld      r0,SOFTE(r1)
        cmpdi   r0,0
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
-       andi.   r0,r3,MSR_EE
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
        crandc  eq,cr1*4+eq,eq
        bne     restore
        /* here we are preempting the current task */
 1:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
        li      r0,1
-       stb     r0,PACAPROCENABLED(r13)
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+       stb     r0,PACASOFTIRQEN(r13)
+       stb     r0,PACAHARDIRQEN(r13)
        ori     r10,r10,MSR_EE
        mtmsrd  r10,1           /* reenable interrupts */
        bl      .preempt_schedule
@@ -639,8 +637,7 @@ _GLOBAL(enter_rtas)
        /* There is no way it is acceptable to get here with interrupts enabled,
         * check it with the asm equivalent of WARN_ON
         */
-       mfmsr   r6
-       andi.   r0,r6,MSR_EE
+       lbz     r0,PACASOFTIRQEN(r13)
 1:     tdnei   r0,0
 .section __bug_table,"a"
        .llong  1b,__LINE__ + 0x1000000, 1f, 2f
@@ -649,7 +646,13 @@ _GLOBAL(enter_rtas)
 1:     .asciz  __FILE__
 2:     .asciz "enter_rtas"
 .previous
-       
+
+       /* Hard-disable interrupts */
+       mfmsr   r6
+       rldicl  r7,r6,48,1
+       rotldi  r7,r7,16
+       mtmsrd  r7,1
+
        /* Unfortunately, the stack pointer and the MSR are also clobbered,
         * so they are saved in the PACA which allows us to restore
         * our original state after RTAS returns.
@@ -735,8 +738,6 @@ _STATIC(rtas_restore_regs)
 
 #endif /* CONFIG_PPC_RTAS */
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
 _GLOBAL(enter_prom)
        mflr    r0
        std     r0,16(r1)
@@ -821,5 +822,3 @@ _GLOBAL(enter_prom)
        ld      r0,16(r1)
        mtlr    r0
         blr
-       
-#endif /* CONFIG_PPC_MULTIPLATFORM */
index e720729f3e5536c6d267b68677a3c1a2ba514432..71b1fe58e9e4d8a0a06fd9d3f6b3059f5e51c43f 100644 (file)
@@ -35,9 +35,7 @@
 #include <asm/thread_info.h>
 #include <asm/firmware.h>
 
-#ifdef CONFIG_PPC_ISERIES
 #define DO_SOFT_DISABLE
-#endif
 
 /*
  * We layout physical memory as follows:
        .text
        .globl  _stext
 _stext:
-#ifdef CONFIG_PPC_MULTIPLATFORM
 _GLOBAL(__start)
        /* NOP this out unconditionally */
 BEGIN_FTR_SECTION
        b       .__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
        /* Catch branch to 0 in real mode */
        trap
@@ -308,7 +304,9 @@ exception_marker:
        std     r9,_LINK(r1);                                              \
        mfctr   r10;                    /* save CTR in stackframe       */ \
        std     r10,_CTR(r1);                                              \
+       lbz     r10,PACASOFTIRQEN(r13);                            \
        mfspr   r11,SPRN_XER;           /* save XER in stackframe       */ \
+       std     r10,SOFTE(r1);                                             \
        std     r11,_XER(r1);                                              \
        li      r9,(n)+1;                                                  \
        std     r9,_TRAP(r1);           /* set trap number              */ \
@@ -343,6 +341,34 @@ label##_pSeries:                                   \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
 
 
+#define MASKABLE_EXCEPTION_PSERIES(n, label)                           \
+       . = n;                                                          \
+       .globl label##_pSeries;                                         \
+label##_pSeries:                                                       \
+       HMT_MEDIUM;                                                     \
+       mtspr   SPRN_SPRG1,r13;         /* save r13 */                  \
+       mfspr   r13,SPRN_SPRG3;         /* get paca address into r13 */ \
+       std     r9,PACA_EXGEN+EX_R9(r13);       /* save r9, r10 */      \
+       std     r10,PACA_EXGEN+EX_R10(r13);                             \
+       lbz     r10,PACASOFTIRQEN(r13);                                 \
+       mfcr    r9;                                                     \
+       cmpwi   r10,0;                                                  \
+       beq     masked_interrupt;                                       \
+       mfspr   r10,SPRN_SPRG1;                                         \
+       std     r10,PACA_EXGEN+EX_R13(r13);                             \
+       std     r11,PACA_EXGEN+EX_R11(r13);                             \
+       std     r12,PACA_EXGEN+EX_R12(r13);                             \
+       clrrdi  r12,r13,32;             /* get high part of &label */   \
+       mfmsr   r10;                                                    \
+       mfspr   r11,SPRN_SRR0;          /* save SRR0 */                 \
+       LOAD_HANDLER(r12,label##_common)                                \
+       ori     r10,r10,MSR_IR|MSR_DR|MSR_RI;                           \
+       mtspr   SPRN_SRR0,r12;                                          \
+       mfspr   r12,SPRN_SRR1;          /* and SRR1 */                  \
+       mtspr   SPRN_SRR1,r10;                                          \
+       rfid;                                                           \
+       b       .       /* prevent speculative execution */
+
 #define STD_EXCEPTION_ISERIES(n, label, area)          \
        .globl label##_iSeries;                         \
 label##_iSeries:                                       \
@@ -358,40 +384,32 @@ label##_iSeries:                                                  \
        HMT_MEDIUM;                                                     \
        mtspr   SPRN_SPRG1,r13;         /* save r13 */                  \
        EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN);                         \
-       lbz     r10,PACAPROCENABLED(r13);                               \
+       lbz     r10,PACASOFTIRQEN(r13);                                 \
        cmpwi   0,r10,0;                                                \
        beq-    label##_iSeries_masked;                                 \
        EXCEPTION_PROLOG_ISERIES_2;                                     \
        b       label##_common;                                         \
 
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
 #define DISABLE_INTS                           \
-BEGIN_FW_FTR_SECTION;                          \
-       lbz     r10,PACAPROCENABLED(r13);       \
        li      r11,0;                          \
-       std     r10,SOFTE(r1);                  \
+       stb     r11,PACASOFTIRQEN(r13);         \
+BEGIN_FW_FTR_SECTION;                          \
+       stb     r11,PACAHARDIRQEN(r13);         \
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);  \
+BEGIN_FW_FTR_SECTION;                          \
        mfmsr   r10;                            \
-       stb     r11,PACAPROCENABLED(r13);       \
        ori     r10,r10,MSR_EE;                 \
        mtmsrd  r10,1;                          \
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 
-#define ENABLE_INTS                            \
-BEGIN_FW_FTR_SECTION;                          \
-       lbz     r10,PACAPROCENABLED(r13);       \
-       mfmsr   r11;                            \
-       std     r10,SOFTE(r1);                  \
-       ori     r11,r11,MSR_EE;                 \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES);  \
-BEGIN_FW_FTR_SECTION;                          \
-       ld      r12,_MSR(r1);                   \
-       mfmsr   r11;                            \
-       rlwimi  r11,r12,0,MSR_EE;               \
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);  \
-       mtmsrd  r11,1
+#else
+#define DISABLE_INTS                           \
+       li      r11,0;                          \
+       stb     r11,PACASOFTIRQEN(r13);         \
+       stb     r11,PACAHARDIRQEN(r13)
 
-#else  /* hard enable/disable interrupts */
-#define DISABLE_INTS
+#endif /* CONFIG_PPC_ISERIES */
 
 #define ENABLE_INTS                            \
        ld      r12,_MSR(r1);                   \
@@ -399,8 +417,6 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES);       \
        rlwimi  r11,r12,0,MSR_EE;               \
        mtmsrd  r11,1
 
-#endif
-
 #define STD_EXCEPTION_COMMON(trap, label, hdlr)                \
        .align  7;                                      \
        .globl label##_common;                          \
@@ -541,11 +557,11 @@ instruction_access_slb_pSeries:
        mfspr   r12,SPRN_SRR1           /* and SRR1 */
        b       .slb_miss_realmode      /* Rel. branch works in real mode */
 
-       STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+       MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
        STD_EXCEPTION_PSERIES(0x600, alignment)
        STD_EXCEPTION_PSERIES(0x700, program_check)
        STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
-       STD_EXCEPTION_PSERIES(0x900, decrementer)
+       MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
        STD_EXCEPTION_PSERIES(0xa00, trap_0a)
        STD_EXCEPTION_PSERIES(0xb00, trap_0b)
 
@@ -597,7 +613,24 @@ system_call_pSeries:
 /*** pSeries interrupt support ***/
 
        /* moved from 0xf00 */
-       STD_EXCEPTION_PSERIES(., performance_monitor)
+       MASKABLE_EXCEPTION_PSERIES(., performance_monitor)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+       stb     r10,PACAHARDIRQEN(r13)
+       mtcrf   0x80,r9
+       ld      r9,PACA_EXGEN+EX_R9(r13)
+       mfspr   r10,SPRN_SRR1
+       rldicl  r10,r10,48,1            /* clear MSR_EE */
+       rotldi  r10,r10,16
+       mtspr   SPRN_SRR1,r10
+       ld      r10,PACA_EXGEN+EX_R10(r13)
+       mfspr   r13,SPRN_SPRG1
+       rfid
+       b       .
 
        .align  7
 do_stab_bolted_pSeries:
@@ -792,7 +825,7 @@ system_reset_iSeries:
 
        cmpwi   0,r23,0
        beq     iSeries_secondary_smp_loop      /* Loop until told to go */
-       bne     .__secondary_start              /* Loop until told to go */
+       bne     __secondary_start               /* Loop until told to go */
 iSeries_secondary_smp_loop:
        /* Let the Hypervisor know we are alive */
        /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -813,7 +846,6 @@ iSeries_secondary_smp_loop:
        b       1b                      /* If SMP not configured, secondaries
                                         * loop forever */
 
-       .globl decrementer_iSeries_masked
 decrementer_iSeries_masked:
        /* We may not have a valid TOC pointer in here. */
        li      r11,1
@@ -824,7 +856,6 @@ decrementer_iSeries_masked:
        mtspr   SPRN_DEC,r12
        /* fall through */
 
-       .globl hardware_interrupt_iSeries_masked
 hardware_interrupt_iSeries_masked:
        mtcrf   0x80,r9         /* Restore regs */
        ld      r12,PACALPPACAPTR(r13)
@@ -926,10 +957,18 @@ bad_stack:
  * any task or sent any task a signal, you should use
  * ret_from_except or ret_from_except_lite instead of this.
  */
+fast_exc_return_irq:                   /* restores irq state too */
+       ld      r3,SOFTE(r1)
+       ld      r12,_MSR(r1)
+       stb     r3,PACASOFTIRQEN(r13)   /* restore paca->soft_enabled */
+       rldicl  r4,r12,49,63            /* get MSR_EE to LSB */
+       stb     r4,PACAHARDIRQEN(r13)   /* restore paca->hard_enabled */
+       b       1f
+
        .globl  fast_exception_return
 fast_exception_return:
        ld      r12,_MSR(r1)
-       ld      r11,_NIP(r1)
+1:     ld      r11,_NIP(r1)
        andi.   r3,r12,MSR_RI           /* check if RI is set */
        beq-    unrecov_fer
 
@@ -952,7 +991,8 @@ fast_exception_return:
        REST_8GPRS(2, r1)
 
        mfmsr   r10
-       clrrdi  r10,r10,2               /* clear RI (LE is 0 already) */
+       rldicl  r10,r10,48,1            /* clear EE */
+       rldicr  r10,r10,16,61           /* clear RI (LE is 0 already) */
        mtmsrd  r10,1
 
        mtspr   SPRN_SRR1,r12
@@ -1326,6 +1366,16 @@ BEGIN_FW_FTR_SECTION
         * interrupts if necessary.
         */
        beq     13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif
+BEGIN_FW_FTR_SECTION
+       /*
+        * Here we have interrupts hard-disabled, so it is sufficient
+        * to restore paca->{soft,hard}_enable and get out.
+        */
+       beq     fast_exc_return_irq     /* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
        /* For a hash failure, we don't bother re-enabling interrupts */
        ble-    12f
 
@@ -1337,14 +1387,6 @@ BEGIN_FW_FTR_SECTION
        ld      r3,SOFTE(r1)
        bl      .local_irq_restore
        b       11f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
-BEGIN_FW_FTR_SECTION
-       beq     fast_exception_return   /* Return from exception on success */
-       ble-    12f                     /* Failure return from hash_page */
-
-       /* fall through */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 
 /* Here we have a page fault that hash_page can't handle. */
 handle_page_fault:
@@ -1362,6 +1404,8 @@ handle_page_fault:
        bl      .bad_page_fault
        b       .ret_from_except
 
+13:    b       .ret_from_except_lite
+
 /* We have a page fault that hash_page could handle but HV refused
  * the PTE insertion
  */
@@ -1371,8 +1415,6 @@ handle_page_fault:
        bl      .low_hash_fault
        b       .ret_from_except
 
-13:    b       .ret_from_except_lite
-
        /* here we have a segment miss */
 do_ste_alloc:
        bl      .ste_allocate           /* try to insert stab entry */
@@ -1560,7 +1602,7 @@ _GLOBAL(generic_secondary_smp_init)
        ld      r1,PACAEMERGSP(r13)
        subi    r1,r1,STACK_FRAME_OVERHEAD
 
-       b       .__secondary_start
+       b       __secondary_start
 #endif
 
 #ifdef CONFIG_PPC_ISERIES
@@ -1595,7 +1637,6 @@ _STATIC(__start_initialization_iSeries)
        b       .start_here_common
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 
 _STATIC(__mmu_off)
        mfmsr   r3
@@ -1621,13 +1662,11 @@ _STATIC(__mmu_off)
  *
  */
 _GLOBAL(__start_initialization_multiplatform)
-#ifdef CONFIG_PPC_MULTIPLATFORM
        /*
         * Are we booted from a PROM Of-type client-interface ?
         */
        cmpldi  cr0,r5,0
        bne     .__boot_from_prom               /* yes -> prom */
-#endif
 
        /* Save parameters */
        mr      r31,r3
@@ -1656,7 +1695,6 @@ _GLOBAL(__start_initialization_multiplatform)
        bl      .__mmu_off
        b       .__after_prom_start
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 _STATIC(__boot_from_prom)
        /* Save parameters */
        mr      r31,r3
@@ -1696,7 +1734,6 @@ _STATIC(__boot_from_prom)
        bl      .prom_init
        /* We never return */
        trap
-#endif
 
 /*
  * At this point, r3 contains the physical address we are running at,
@@ -1752,8 +1789,6 @@ _STATIC(__after_prom_start)
        bl      .copy_and_flush         /* copy the rest */
        b       .start_here_multiplatform
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /*
  * Copy routine used to copy the kernel to start at physical address 0
  * and flush and invalidate the caches as needed.
@@ -1836,7 +1871,7 @@ _GLOBAL(pmac_secondary_start)
        ld      r1,PACAEMERGSP(r13)
        subi    r1,r1,STACK_FRAME_OVERHEAD
 
-       b       .__secondary_start
+       b       __secondary_start
 
 #endif /* CONFIG_PPC_PMAC */
 
@@ -1853,7 +1888,7 @@ _GLOBAL(pmac_secondary_start)
  *   r13   = paca virtual address
  *   SPRG3 = paca virtual address
  */
-_GLOBAL(__secondary_start)
+__secondary_start:
        /* Set thread priority to MEDIUM */
        HMT_MEDIUM
 
@@ -1877,11 +1912,16 @@ _GLOBAL(__secondary_start)
        /* enable MMU and jump to start_secondary */
        LOAD_REG_ADDR(r3, .start_secondary_prolog)
        LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
-#ifdef DO_SOFT_DISABLE
+#ifdef CONFIG_PPC_ISERIES
 BEGIN_FW_FTR_SECTION
        ori     r4,r4,MSR_EE
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+BEGIN_FW_FTR_SECTION
+       stb     r7,PACASOFTIRQEN(r13)
+       stb     r7,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
        mtspr   SPRN_SRR0,r3
        mtspr   SPRN_SRR1,r4
        rfid
@@ -1913,7 +1953,6 @@ _GLOBAL(enable_64b_mode)
        isync
        blr
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 /*
  * This is where the main kernel code starts.
  */
@@ -1977,7 +2016,6 @@ _STATIC(start_here_multiplatform)
        mtspr   SPRN_SRR1,r4
        rfid
        b       .       /* prevent speculative execution */
-#endif /* CONFIG_PPC_MULTIPLATFORM */
        
        /* This is where all platforms converge execution */
 _STATIC(start_here_common)
@@ -2005,15 +2043,18 @@ _STATIC(start_here_common)
 
        /* Load up the kernel context */
 5:
-#ifdef DO_SOFT_DISABLE
-BEGIN_FW_FTR_SECTION
        li      r5,0
-       stb     r5,PACAPROCENABLED(r13) /* Soft Disabled */
+       stb     r5,PACASOFTIRQEN(r13)   /* Soft Disabled */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
        mfmsr   r5
        ori     r5,r5,MSR_EE            /* Hard Enabled */
        mtmsrd  r5
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
 #endif
+BEGIN_FW_FTR_SECTION
+       stb     r5,PACAHARDIRQEN(r13)
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
 
        bl .start_kernel
 
index 39db7a3affe11e37f90c310aab469f1f0686827e..82bd2f10770f9a8d732f0bc287182f94fcf00eec 100644 (file)
@@ -112,7 +112,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-struct dma_mapping_ops ibmebus_dma_ops = {
+static struct dma_mapping_ops ibmebus_dma_ops = {
        .alloc_coherent = ibmebus_alloc_coherent,
        .free_coherent  = ibmebus_free_coherent,
        .map_single     = ibmebus_map_single,
@@ -176,6 +176,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_common(
        dev->ofdev.dev.bus     = &ibmebus_bus_type;
        dev->ofdev.dev.release = ibmebus_dev_release;
 
+       dev->ofdev.dev.archdata.of_node = dev->ofdev.node;
+       dev->ofdev.dev.archdata.dma_ops = &ibmebus_dma_ops;
+       dev->ofdev.dev.archdata.numa_node = of_node_to_nid(dev->ofdev.node);
+
        /* An ibmebusdev is based on a of_device. We have to change the
         * bus type to use our own DMA mapping operations. 
         */       
@@ -210,11 +214,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node(
                return NULL;
        }
 
-       dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
+       dev = kzalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
        if (!dev) {
                return NULL;
        }
-       memset(dev, 0, sizeof(struct ibmebus_dev));
 
        dev->ofdev.node = of_node_get(dn);
        
index 4180c3998b398f47a1fd5cc3bcc779446901b8fb..8994af327b47a2586aa1719f442d10a052fd14eb 100644 (file)
 #define cpu_should_die()       0
 #endif
 
+static int __init powersave_off(char *arg)
+{
+       ppc_md.power_save = NULL;
+       return 0;
+}
+__setup("powersave=off", powersave_off);
+
 /*
  * The body of the idle task.
  */
index 30de81da7b40fe163693ca4400edd643c36e3943..ba3195478600998a72bbdbabeaf7761963a435ed 100644 (file)
@@ -30,6 +30,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
        beqlr
 
        /* Go to NAP now */
+       mfmsr   r7
+       rldicl  r0,r7,48,1
+       rotldi  r0,r0,16
+       mtmsrd  r0,1                    /* hard-disable interrupts */
+       li      r0,1
+       stb     r0,PACASOFTIRQEN(r13)   /* we'll hard-enable shortly */
+       stb     r0,PACAHARDIRQEN(r13)
 BEGIN_FTR_SECTION
        DSSALL
        sync
@@ -38,7 +45,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        ld      r8,TI_LOCAL_FLAGS(r9)   /* set napping bit */
        ori     r8,r8,_TLF_NAPPING      /* so when we take an exception */
        std     r8,TI_LOCAL_FLAGS(r9)   /* it will return to our caller */
-       mfmsr   r7
        ori     r7,r7,MSR_EE
        oris    r7,r7,MSR_POW@h
 1:     sync
index e98180686b352d848790b29f0fc3de809131ff41..34ae11494ddcd96d0b27a8f961ff2b8d6d166041 100644 (file)
 #include <asm/firmware.h>
 #include <asm/bug.h>
 
-void _insb(volatile u8 __iomem *port, void *buf, long count)
+void _insb(const volatile u8 __iomem *port, void *buf, long count)
 {
        u8 *tbuf = buf;
        u8 tmp;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -48,8 +46,6 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
 {
        const u8 *tbuf = buf;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -60,13 +56,11 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
 }
 EXPORT_SYMBOL(_outsb);
 
-void _insw_ns(volatile u16 __iomem *port, void *buf, long count)
+void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
 {
        u16 *tbuf = buf;
        u16 tmp;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -83,8 +77,6 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
 {
        const u16 *tbuf = buf;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -95,13 +87,11 @@ void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
 }
 EXPORT_SYMBOL(_outsw_ns);
 
-void _insl_ns(volatile u32 __iomem *port, void *buf, long count)
+void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
 {
        u32 *tbuf = buf;
        u32 tmp;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -118,8 +108,6 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
 {
        const u32 *tbuf = buf;
 
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
        if (unlikely(count <= 0))
                return;
        asm volatile("sync");
@@ -129,3 +117,90 @@ void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
        asm volatile("sync");
 }
 EXPORT_SYMBOL(_outsl_ns);
+
+#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
+
+void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+{
+       void *p = (void __force *)addr;
+       u32 lc = c;
+       lc |= lc << 8;
+       lc |= lc << 16;
+
+       __asm__ __volatile__ ("sync" : : : "memory");
+       while(n && !IO_CHECK_ALIGN(p, 4)) {
+               *((volatile u8 *)p) = c;
+               p++;
+               n--;
+       }
+       while(n >= 4) {
+               *((volatile u32 *)p) = lc;
+               p += 4;
+               n -= 4;
+       }
+       while(n) {
+               *((volatile u8 *)p) = c;
+               p++;
+               n--;
+       }
+       __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memset_io);
+
+void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+                   unsigned long n)
+{
+       void *vsrc = (void __force *) src;
+
+       __asm__ __volatile__ ("sync" : : : "memory");
+       while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
+               *((u8 *)dest) = *((volatile u8 *)vsrc);
+               __asm__ __volatile__ ("eieio" : : : "memory");
+               vsrc++;
+               dest++;
+               n--;
+       }
+       while(n > 4) {
+               *((u32 *)dest) = *((volatile u32 *)vsrc);
+               __asm__ __volatile__ ("eieio" : : : "memory");
+               vsrc += 4;
+               dest += 4;
+               n -= 4;
+       }
+       while(n) {
+               *((u8 *)dest) = *((volatile u8 *)vsrc);
+               __asm__ __volatile__ ("eieio" : : : "memory");
+               vsrc++;
+               dest++;
+               n--;
+       }
+       __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_fromio);
+
+void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
+{
+       void *vdest = (void __force *) dest;
+
+       __asm__ __volatile__ ("sync" : : : "memory");
+       while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
+               *((volatile u8 *)vdest) = *((u8 *)src);
+               src++;
+               vdest++;
+               n--;
+       }
+       while(n > 4) {
+               *((volatile u32 *)vdest) = *((volatile u32 *)src);
+               src += 4;
+               vdest += 4;
+               n-=4;
+       }
+       while(n) {
+               *((volatile u8 *)vdest) = *((u8 *)src);
+               src++;
+               vdest++;
+               n--;
+       }
+       __asm__ __volatile__ ("sync" : : : "memory");
+}
+EXPORT_SYMBOL(_memcpy_toio);
index a13a93dfc65539e7a70ef75baab047bcb250e277..c68113371050780a2910d067109cabe6b0c30b25 100644 (file)
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(iowrite32_rep);
 
 void __iomem *ioport_map(unsigned long port, unsigned int len)
 {
-       return (void __iomem *) (port+pci_io_base);
+       return (void __iomem *) (port + _IO_BASE);
 }
 
 void ioport_unmap(void __iomem *addr)
index ba6b7256084b31bb64ecd602782bc48ef3bdf545..95edad4faf26697d61e019f3007c18d0a47f41fa 100644 (file)
@@ -258,9 +258,9 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
        spin_unlock_irqrestore(&(tbl->it_lock), flags);
 }
 
-int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
-               struct scatterlist *sglist, int nelems,
-               unsigned long mask, enum dma_data_direction direction)
+int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+                int nelems, unsigned long mask,
+                enum dma_data_direction direction)
 {
        dma_addr_t dma_next = 0, dma_addr;
        unsigned long flags;
index 5e37bf14ef2dba4dd6094923b85bbed7591bbb7f..0bd8c7665834bf39b5a5b32485e6cc279856f9f5 100644 (file)
@@ -64,8 +64,9 @@
 #include <asm/ptrace.h>
 #include <asm/machdep.h>
 #include <asm/udbg.h>
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
 #include <asm/paca.h>
+#include <asm/firmware.h>
 #endif
 
 int __irq_offset_value;
@@ -95,6 +96,74 @@ extern atomic_t ipi_sent;
 EXPORT_SYMBOL(irq_desc);
 
 int distribute_irqs = 1;
+
+static inline unsigned long get_hard_enabled(void)
+{
+       unsigned long enabled;
+
+       __asm__ __volatile__("lbz %0,%1(13)"
+       : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
+
+       return enabled;
+}
+
+static inline void set_soft_enabled(unsigned long enable)
+{
+       __asm__ __volatile__("stb %0,%1(13)"
+       : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
+}
+
+void local_irq_restore(unsigned long en)
+{
+       /*
+        * get_paca()->soft_enabled = en;
+        * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
+        * That was allowed before, and in such a case we do need to take care
+        * that gcc will set soft_enabled directly via r13, not choose to use
+        * an intermediate register, lest we're preempted to a different cpu.
+        */
+       set_soft_enabled(en);
+       if (!en)
+               return;
+
+       if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+               /*
+                * Do we need to disable preemption here?  Not really: in the
+                * unlikely event that we're preempted to a different cpu in
+                * between getting r13, loading its lppaca_ptr, and loading
+                * its any_int, we might call iseries_handle_interrupts without
+                * an interrupt pending on the new cpu, but that's no disaster,
+                * is it?  And the business of preempting us off the old cpu
+                * would itself involve a local_irq_restore which handles the
+                * interrupt to that cpu.
+                *
+                * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
+                * to avoid any preemption checking added into get_paca().
+                */
+               if (local_paca->lppaca_ptr->int_dword.any_int)
+                       iseries_handle_interrupts();
+               return;
+       }
+
+       /*
+        * if (get_paca()->hard_enabled) return;
+        * But again we need to take care that gcc gets hard_enabled directly
+        * via r13, not choose to use an intermediate register, lest we're
+        * preempted to a different cpu in between the two instructions.
+        */
+       if (get_hard_enabled())
+               return;
+
+       /*
+        * Need to hard-enable interrupts here.  Since currently disabled,
+        * no need to take further asm precautions against preemption; but
+        * use local_paca instead of get_paca() to avoid preemption checking.
+        */
+       local_paca->hard_enabled = en;
+       if ((int)mfspr(SPRN_DEC) < 0)
+               mtspr(SPRN_DEC, 1);
+       hard_irq_enable();
+}
 #endif /* CONFIG_PPC64 */
 
 int show_interrupts(struct seq_file *p, void *v)
@@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs)
        set_irq_regs(old_regs);
 
 #ifdef CONFIG_PPC_ISERIES
-       if (get_lppaca()->int_dword.fields.decr_int) {
+       if (firmware_has_feature(FW_FEATURE_ISERIES) &&
+                       get_lppaca()->int_dword.fields.decr_int) {
                get_lppaca()->int_dword.fields.decr_int = 0;
                /* Signal a fake decrementer interrupt */
                timer_interrupt(regs);
@@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
 
 void irq_dispose_mapping(unsigned int virq)
 {
-       struct irq_host *host = irq_map[virq].host;
+       struct irq_host *host;
        irq_hw_number_t hwirq;
        unsigned long flags;
 
+       if (virq == NO_IRQ)
+               return;
+
+       host = irq_map[virq].host;
        WARN_ON (host == NULL);
        if (host == NULL)
                return;
index 7b8d12b9026c48487fbad3a4b83ca682108db260..4657563f88139ea60d4f4e4775c0cd8a33b15d42 100644 (file)
@@ -85,7 +85,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
        mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
+       free_insn_slot(p->ainsn.insn, 0);
        mutex_unlock(&kprobe_mutex);
 }
 
index 397c83eda20ee3ac907c929e526bf59510b7b5f1..8a06724e029e4fd1f8cee862b95bd7101839ed20 100644 (file)
@@ -9,30 +9,26 @@
 #include <asm/of_device.h>
 
 /**
- * of_match_device - Tell if an of_device structure has a matching
- * of_match structure
+ * of_match_node - Tell if an device_node has a matching of_match structure
  * @ids: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @node: the of device structure to match against
  *
- * Used by a driver to check whether an of_device present in the
- * system is in its list of supported devices.
+ * Low level utility function used by device matching.
  */
-const struct of_device_id *of_match_device(const struct of_device_id *matches,
-                                       const struct of_device *dev)
+const struct of_device_id *of_match_node(const struct of_device_id *matches,
+                                        const struct device_node *node)
 {
-       if (!dev->node)
-               return NULL;
        while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
                int match = 1;
                if (matches->name[0])
-                       match &= dev->node->name
-                               && !strcmp(matches->name, dev->node->name);
+                       match &= node->name
+                               && !strcmp(matches->name, node->name);
                if (matches->type[0])
-                       match &= dev->node->type
-                               && !strcmp(matches->type, dev->node->type);
+                       match &= node->type
+                               && !strcmp(matches->type, node->type);
                if (matches->compatible[0])
-                       match &= device_is_compatible(dev->node,
-                               matches->compatible);
+                       match &= device_is_compatible(node,
+                                                     matches->compatible);
                if (match)
                        return matches;
                matches++;
@@ -40,16 +36,21 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches,
        return NULL;
 }
 
-static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+/**
+ * of_match_device - Tell if an of_device structure has a matching
+ * of_match structure
+ * @ids: array of of device match structures to search in
+ * @dev: the of device structure to match against
+ *
+ * Used by a driver to check whether an of_device present in the
+ * system is in its list of supported devices.
+ */
+const struct of_device_id *of_match_device(const struct of_device_id *matches,
+                                       const struct of_device *dev)
 {
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * of_drv = to_of_platform_driver(drv);
-       const struct of_device_id * matches = of_drv->match_table;
-
-       if (!matches)
-               return 0;
-
-       return of_match_device(matches, of_dev) != NULL;
+       if (!dev->node)
+               return NULL;
+       return of_match_node(matches, dev->node);
 }
 
 struct of_device *of_dev_get(struct of_device *dev)
@@ -71,96 +72,8 @@ void of_dev_put(struct of_device *dev)
                put_device(&dev->dev);
 }
 
-
-static int of_device_probe(struct device *dev)
-{
-       int error = -ENODEV;
-       struct of_platform_driver *drv;
-       struct of_device *of_dev;
-       const struct of_device_id *match;
-
-       drv = to_of_platform_driver(dev->driver);
-       of_dev = to_of_device(dev);
-
-       if (!drv->probe)
-               return error;
-
-       of_dev_get(of_dev);
-
-       match = of_match_device(drv->match_table, of_dev);
-       if (match)
-               error = drv->probe(of_dev, match);
-       if (error)
-               of_dev_put(of_dev);
-
-       return error;
-}
-
-static int of_device_remove(struct device *dev)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-
-       if (dev->driver && drv->remove)
-               drv->remove(of_dev);
-       return 0;
-}
-
-static int of_device_suspend(struct device *dev, pm_message_t state)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->suspend)
-               error = drv->suspend(of_dev, state);
-       return error;
-}
-
-static int of_device_resume(struct device * dev)
-{
-       struct of_device * of_dev = to_of_device(dev);
-       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->resume)
-               error = drv->resume(of_dev);
-       return error;
-}
-
-struct bus_type of_platform_bus_type = {
-       .name   = "of_platform",
-       .match  = of_platform_bus_match,
-       .probe  = of_device_probe,
-       .remove = of_device_remove,
-       .suspend        = of_device_suspend,
-       .resume = of_device_resume,
-};
-
-static int __init of_bus_driver_init(void)
-{
-       return bus_register(&of_platform_bus_type);
-}
-
-postcore_initcall(of_bus_driver_init);
-
-int of_register_driver(struct of_platform_driver *drv)
-{
-       /* initialize common driver fields */
-       drv->driver.name = drv->name;
-       drv->driver.bus = &of_platform_bus_type;
-
-       /* register with core */
-       return driver_register(&drv->driver);
-}
-
-void of_unregister_driver(struct of_platform_driver *drv)
-{
-       driver_unregister(&drv->driver);
-}
-
-
-static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dev_show_devspec(struct device *dev,
+                               struct device_attribute *attr, char *buf)
 {
        struct of_device *ofdev;
 
@@ -208,41 +121,11 @@ void of_device_unregister(struct of_device *ofdev)
        device_unregister(&ofdev->dev);
 }
 
-struct of_device* of_platform_device_create(struct device_node *np,
-                                           const char *bus_id,
-                                           struct device *parent)
-{
-       struct of_device *dev;
-
-       dev = kmalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return NULL;
-       memset(dev, 0, sizeof(*dev));
-
-       dev->node = of_node_get(np);
-       dev->dma_mask = 0xffffffffUL;
-       dev->dev.dma_mask = &dev->dma_mask;
-       dev->dev.parent = parent;
-       dev->dev.bus = &of_platform_bus_type;
-       dev->dev.release = of_release_dev;
-
-       strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
-
-       if (of_device_register(dev) != 0) {
-               kfree(dev);
-               return NULL;
-       }
-
-       return dev;
-}
 
+EXPORT_SYMBOL(of_match_node);
 EXPORT_SYMBOL(of_match_device);
-EXPORT_SYMBOL(of_platform_bus_type);
-EXPORT_SYMBOL(of_register_driver);
-EXPORT_SYMBOL(of_unregister_driver);
 EXPORT_SYMBOL(of_device_register);
 EXPORT_SYMBOL(of_device_unregister);
 EXPORT_SYMBOL(of_dev_get);
 EXPORT_SYMBOL(of_dev_put);
-EXPORT_SYMBOL(of_platform_device_create);
 EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
new file mode 100644 (file)
index 0000000..b3189d0
--- /dev/null
@@ -0,0 +1,489 @@
+/*
+ *    Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                      <benh@kernel.crashing.org>
+ *    and               Arnd Bergmann, IBM Corp.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include <asm/errno.h>
+#include <asm/dcr.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/topology.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+#include <asm/atomic.h>
+
+
+/*
+ * The list of OF IDs below is used for matching bus types in the
+ * system whose devices are to be exposed as of_platform_devices.
+ *
+ * This is the default list valid for most platforms. This file provides
+ * functions who can take an explicit list if necessary though
+ *
+ * The search is always performed recursively looking for children of
+ * the provided device_node and recursively if such a children matches
+ * a bus type in the list
+ */
+
+static struct of_device_id of_default_bus_ids[] = {
+       { .type = "soc", },
+       { .compatible = "soc", },
+       { .type = "spider", },
+       { .type = "axon", },
+       { .type = "plb5", },
+       { .type = "plb4", },
+       { .type = "opb", },
+       {},
+};
+
+static atomic_t bus_no_reg_magic;
+
+/*
+ *
+ * OF platform device type definition & base infrastructure
+ *
+ */
+
+static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
+{
+       struct of_device * of_dev = to_of_device(dev);
+       struct of_platform_driver * of_drv = to_of_platform_driver(drv);
+       const struct of_device_id * matches = of_drv->match_table;
+
+       if (!matches)
+               return 0;
+
+       return of_match_device(matches, of_dev) != NULL;
+}
+
+static int of_platform_device_probe(struct device *dev)
+{
+       int error = -ENODEV;
+       struct of_platform_driver *drv;
+       struct of_device *of_dev;
+       const struct of_device_id *match;
+
+       drv = to_of_platform_driver(dev->driver);
+       of_dev = to_of_device(dev);
+
+       if (!drv->probe)
+               return error;
+
+       of_dev_get(of_dev);
+
+       match = of_match_device(drv->match_table, of_dev);
+       if (match)
+               error = drv->probe(of_dev, match);
+       if (error)
+               of_dev_put(of_dev);
+
+       return error;
+}
+
+static int of_platform_device_remove(struct device *dev)
+{
+       struct of_device * of_dev = to_of_device(dev);
+       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+
+       if (dev->driver && drv->remove)
+               drv->remove(of_dev);
+       return 0;
+}
+
+static int of_platform_device_suspend(struct device *dev, pm_message_t state)
+{
+       struct of_device * of_dev = to_of_device(dev);
+       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+       int error = 0;
+
+       if (dev->driver && drv->suspend)
+               error = drv->suspend(of_dev, state);
+       return error;
+}
+
+static int of_platform_device_resume(struct device * dev)
+{
+       struct of_device * of_dev = to_of_device(dev);
+       struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
+       int error = 0;
+
+       if (dev->driver && drv->resume)
+               error = drv->resume(of_dev);
+       return error;
+}
+
+struct bus_type of_platform_bus_type = {
+       .name   = "of_platform",
+       .match  = of_platform_bus_match,
+       .probe  = of_platform_device_probe,
+       .remove = of_platform_device_remove,
+       .suspend        = of_platform_device_suspend,
+       .resume = of_platform_device_resume,
+};
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static int __init of_bus_driver_init(void)
+{
+       return bus_register(&of_platform_bus_type);
+}
+
+postcore_initcall(of_bus_driver_init);
+
+int of_register_platform_driver(struct of_platform_driver *drv)
+{
+       /* initialize common driver fields */
+       drv->driver.name = drv->name;
+       drv->driver.bus = &of_platform_bus_type;
+
+       /* register with core */
+       return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(of_register_platform_driver);
+
+void of_unregister_platform_driver(struct of_platform_driver *drv)
+{
+       driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(of_unregister_platform_driver);
+
+static void of_platform_make_bus_id(struct of_device *dev)
+{
+       struct device_node *node = dev->node;
+       char *name = dev->dev.bus_id;
+       const u32 *reg;
+       u64 addr;
+       long magic;
+
+       /*
+        * If it's a DCR based device, use 'd' for native DCRs
+        * and 'D' for MMIO DCRs.
+        */
+#ifdef CONFIG_PPC_DCR
+       reg = get_property(node, "dcr-reg", NULL);
+       if (reg) {
+#ifdef CONFIG_PPC_DCR_NATIVE
+               snprintf(name, BUS_ID_SIZE, "d%x.%s",
+                        *reg, node->name);
+#else /* CONFIG_PPC_DCR_NATIVE */
+               addr = of_translate_dcr_address(node, *reg, NULL);
+               if (addr != OF_BAD_ADDR) {
+                       snprintf(name, BUS_ID_SIZE,
+                                "D%llx.%s", (unsigned long long)addr,
+                                node->name);
+                       return;
+               }
+#endif /* !CONFIG_PPC_DCR_NATIVE */
+       }
+#endif /* CONFIG_PPC_DCR */
+
+       /*
+        * For MMIO, get the physical address
+        */
+       reg = get_property(node, "reg", NULL);
+       if (reg) {
+               addr = of_translate_address(node, reg);
+               if (addr != OF_BAD_ADDR) {
+                       snprintf(name, BUS_ID_SIZE,
+                                "%llx.%s", (unsigned long long)addr,
+                                node->name);
+                       return;
+               }
+       }
+
+       /*
+        * No BusID, use the node name and add a globally incremented
+        * counter (and pray...)
+        */
+       magic = atomic_add_return(1, &bus_no_reg_magic);
+       snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+}
+
+struct of_device* of_platform_device_create(struct device_node *np,
+                                           const char *bus_id,
+                                           struct device *parent)
+{
+       struct of_device *dev;
+
+       dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+       memset(dev, 0, sizeof(*dev));
+
+       dev->node = of_node_get(np);
+       dev->dma_mask = 0xffffffffUL;
+       dev->dev.dma_mask = &dev->dma_mask;
+       dev->dev.parent = parent;
+       dev->dev.bus = &of_platform_bus_type;
+       dev->dev.release = of_release_dev;
+       dev->dev.archdata.of_node = np;
+       dev->dev.archdata.numa_node = of_node_to_nid(np);
+
+       /* We do not fill the DMA ops for platform devices by default.
+        * This is currently the responsibility of the platform code
+        * to do such, possibly using a device notifier
+        */
+
+       if (bus_id)
+               strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+       else
+               of_platform_make_bus_id(dev);
+
+       if (of_device_register(dev) != 0) {
+               kfree(dev);
+               return NULL;
+       }
+
+       return dev;
+}
+EXPORT_SYMBOL(of_platform_device_create);
+
+
+
+/**
+ * of_platform_bus_create - Create an OF device for a bus node and all its
+ * children. Optionally recursively instanciate matching busses.
+ * @bus: device node of the bus to instanciate
+ * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to
+ * disallow recursive creation of child busses
+ */
+static int of_platform_bus_create(struct device_node *bus,
+                                 struct of_device_id *matches,
+                                 struct device *parent)
+{
+       struct device_node *child;
+       struct of_device *dev;
+       int rc = 0;
+
+       for (child = NULL; (child = of_get_next_child(bus, child)); ) {
+               pr_debug("   create child: %s\n", child->full_name);
+               dev = of_platform_device_create(child, NULL, parent);
+               if (dev == NULL)
+                       rc = -ENOMEM;
+               else if (!of_match_node(matches, child))
+                       continue;
+               if (rc == 0) {
+                       pr_debug("   and sub busses\n");
+                       rc = of_platform_bus_create(child, matches, &dev->dev);
+               } if (rc) {
+                       of_node_put(child);
+                       break;
+               }
+       }
+       return rc;
+}
+
+/**
+ * of_platform_bus_probe - Probe the device-tree for platform busses
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Note that children of the provided root are not instanciated as devices
+ * unless the specified root itself matches the bus list and is not NULL.
+ */
+
+int of_platform_bus_probe(struct device_node *root,
+                         struct of_device_id *matches,
+                         struct device *parent)
+{
+       struct device_node *child;
+       struct of_device *dev;
+       int rc = 0;
+
+       if (matches == NULL)
+               matches = of_default_bus_ids;
+       if (matches == OF_NO_DEEP_PROBE)
+               return -EINVAL;
+       if (root == NULL)
+               root = of_find_node_by_path("/");
+       else
+               of_node_get(root);
+
+       pr_debug("of_platform_bus_probe()\n");
+       pr_debug(" starting at: %s\n", root->full_name);
+
+       /* Do a self check of bus type, if there's a match, create
+        * children
+        */
+       if (of_match_node(matches, root)) {
+               pr_debug(" root match, create all sub devices\n");
+               dev = of_platform_device_create(root, NULL, parent);
+               if (dev == NULL) {
+                       rc = -ENOMEM;
+                       goto bail;
+               }
+               pr_debug(" create all sub busses\n");
+               rc = of_platform_bus_create(root, matches, &dev->dev);
+               goto bail;
+       }
+       for (child = NULL; (child = of_get_next_child(root, child)); ) {
+               if (!of_match_node(matches, child))
+                       continue;
+
+               pr_debug("  match: %s\n", child->full_name);
+               dev = of_platform_device_create(child, NULL, parent);
+               if (dev == NULL)
+                       rc = -ENOMEM;
+               else
+                       rc = of_platform_bus_create(child, matches, &dev->dev);
+               if (rc) {
+                       of_node_put(child);
+                       break;
+               }
+       }
+ bail:
+       of_node_put(root);
+       return rc;
+}
+EXPORT_SYMBOL(of_platform_bus_probe);
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+       return to_of_device(dev)->node == data;
+}
+
+struct of_device *of_find_device_by_node(struct device_node *np)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&of_platform_bus_type,
+                             NULL, np, of_dev_node_match);
+       if (dev)
+               return to_of_device(dev);
+       return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+static int of_dev_phandle_match(struct device *dev, void *data)
+{
+       phandle *ph = data;
+       return to_of_device(dev)->node->linux_phandle == *ph;
+}
+
+struct of_device *of_find_device_by_phandle(phandle ph)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&of_platform_bus_type,
+                             NULL, &ph, of_dev_phandle_match);
+       if (dev)
+               return to_of_device(dev);
+       return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_phandle);
+
+
+#ifdef CONFIG_PPC_OF_PLATFORM_PCI
+
+/* The probing of PCI controllers from of_platform is currently
+ * 64 bits only, mostly due to gratuitous differences between
+ * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
+ * lacking some bits needed here.
+ */
+
+static int __devinit of_pci_phb_probe(struct of_device *dev,
+                                     const struct of_device_id *match)
+{
+       struct pci_controller *phb;
+
+       /* Check if we can do that ... */
+       if (ppc_md.pci_setup_phb == NULL)
+               return -ENODEV;
+
+       printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name);
+
+       /* Alloc and setup PHB data structure */
+       phb = pcibios_alloc_controller(dev->node);
+       if (!phb)
+               return -ENODEV;
+
+       /* Setup parent in sysfs */
+       phb->parent = &dev->dev;
+
+       /* Setup the PHB using arch provided callback */
+       if (ppc_md.pci_setup_phb(phb)) {
+               pcibios_free_controller(phb);
+               return -ENODEV;
+       }
+
+       /* Process "ranges" property */
+       pci_process_bridge_OF_ranges(phb, dev->node, 0);
+
+       /* Setup IO space.
+        * This will not work properly for ISA IOs, something needs to be done
+        * about it if we ever generalize that way of probing PCI brigdes
+        */
+       pci_setup_phb_io_dynamic(phb, 0);
+
+       /* Init pci_dn data structures */
+       pci_devs_phb_init_dynamic(phb);
+
+       /* Register devices with EEH */
+#ifdef CONFIG_EEH
+       if (dev->node->child)
+               eeh_add_device_tree_early(dev->node);
+#endif /* CONFIG_EEH */
+
+       /* Scan the bus */
+       scan_phb(phb);
+
+       /* Claim resources. This might need some rework as well depending
+        * wether we are doing probe-only or not, like assigning unassigned
+        * resources etc...
+        */
+       pcibios_claim_one_bus(phb->bus);
+
+       /* Finish EEH setup */
+#ifdef CONFIG_EEH
+       eeh_add_device_tree_late(phb->bus);
+#endif
+
+       /* Add probed PCI devices to the device model */
+       pci_bus_add_devices(phb->bus);
+
+       return 0;
+}
+
+static struct of_device_id of_pci_phb_ids[] = {
+       { .type = "pci", },
+       { .type = "pcix", },
+       { .type = "pcie", },
+       { .type = "pciex", },
+       { .type = "ht", },
+       {}
+};
+
+static struct of_platform_driver of_pci_phb_driver = {
+       .name = "of-pci",
+       .match_table = of_pci_phb_ids,
+       .probe = of_pci_phb_probe,
+       .driver = {
+              .multithread_probe = 1,
+       },
+};
+
+static __init int of_pci_phb_init(void)
+{
+       return of_register_platform_driver(&of_pci_phb_driver);
+}
+
+device_initcall(of_pci_phb_init);
+
+#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
index 0d9ff72e28526a7d2f2990388354a5fd37be13ef..2f54cd81dea571ce82468d82361ae0a67e397f3e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/errno.h>
 #include <linux/bootmem.h>
 #include <linux/irq.h>
+#include <linux/list.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
@@ -99,7 +100,7 @@ pcibios_fixup_resources(struct pci_dev *dev)
                        continue;
                if (res->end == 0xffffffff) {
                        DBG("PCI:%s Resource %d [%016llx-%016llx] is unassigned\n",
-                           pci_name(dev), i, res->start, res->end);
+                           pci_name(dev), i, (u64)res->start, (u64)res->end);
                        res->end -= res->start;
                        res->start = 0;
                        res->flags |= IORESOURCE_UNSET;
@@ -115,11 +116,9 @@ pcibios_fixup_resources(struct pci_dev *dev)
                if (offset != 0) {
                        res->start += offset;
                        res->end += offset;
-#ifdef DEBUG
-                       printk("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
-                              i, res->flags, pci_name(dev),
-                              res->start - offset, res->start);
-#endif
+                       DBG("Fixup res %d (%lx) of dev %s: %llx -> %llx\n",
+                           i, res->flags, pci_name(dev),
+                           (u64)res->start - offset, (u64)res->start);
                }
        }
 
@@ -255,7 +254,7 @@ pcibios_allocate_bus_resources(struct list_head *bus_list)
                        }
 
                        DBG("PCI: bridge rsrc %llx..%llx (%lx), parent %p\n",
-                               res->start, res->end, res->flags, pr);
+                           (u64)res->start, (u64)res->end, res->flags, pr);
                        if (pr) {
                                if (request_resource(pr, res) == 0)
                                        continue;
@@ -306,7 +305,7 @@ reparent_resources(struct resource *parent, struct resource *res)
        for (p = res->child; p != NULL; p = p->sibling) {
                p->parent = res;
                DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
-                   p->name, p->start, p->end, res->name);
+                   p->name, (u64)p->start, (u64)p->end, res->name);
        }
        return 0;
 }
@@ -362,7 +361,7 @@ pci_relocate_bridge_resource(struct pci_bus *bus, int i)
        }
        if (request_resource(pr, res)) {
                DBG(KERN_ERR "PCI: huh? couldn't move to %llx..%llx\n",
-                   res->start, res->end);
+                   (u64)res->start, (u64)res->end);
                return -1;              /* "can't happen" */
        }
        update_bridge_base(bus, i);
@@ -480,14 +479,14 @@ static inline void alloc_resource(struct pci_dev *dev, int idx)
        struct resource *pr, *r = &dev->resource[idx];
 
        DBG("PCI:%s: Resource %d: %016llx-%016llx (f=%lx)\n",
-           pci_name(dev), idx, r->start, r->end, r->flags);
+           pci_name(dev), idx, (u64)r->start, (u64)r->end, r->flags);
        pr = pci_find_parent_resource(dev, r);
        if (!pr || request_resource(pr, r) < 0) {
                printk(KERN_ERR "PCI: Cannot allocate resource region %d"
                       " of device %s\n", idx, pci_name(dev));
                if (pr)
                        DBG("PCI:  parent is %p: %016llx-%016llx (f=%lx)\n",
-                           pr, pr->start, pr->end, pr->flags);
+                           pr, (u64)pr->start, (u64)pr->end, pr->flags);
                /* We'll assign a new address later */
                r->flags |= IORESOURCE_UNSET;
                r->end -= r->start;
@@ -960,7 +959,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
                        res->flags = IORESOURCE_IO;
                        res->start = ranges[2];
                        DBG("PCI: IO 0x%llx -> 0x%llx\n",
-                                   res->start, res->start + size - 1);
+                           (u64)res->start, (u64)res->start + size - 1);
                        break;
                case 2:         /* memory space */
                        memno = 0;
@@ -982,7 +981,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
                                        res->flags |= IORESOURCE_PREFETCH;
                                res->start = ranges[na+2];
                                DBG("PCI: MEM[%d] 0x%llx -> 0x%llx\n", memno,
-                                           res->start, res->start + size - 1);
+                                   (u64)res->start, (u64)res->start + size - 1);
                        }
                        break;
                }
@@ -1268,7 +1267,10 @@ pcibios_init(void)
                if (pci_assign_all_buses)
                        hose->first_busno = next_busno;
                hose->last_busno = 0xff;
-               bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
+               bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
+                                           hose->ops, hose);
+               if (bus)
+                       pci_bus_add_devices(bus);
                hose->last_busno = bus->subordinate;
                if (pci_assign_all_buses || next_busno <= hose->last_busno)
                        next_busno = hose->last_busno + pcibios_assign_bus_offset;
@@ -1282,10 +1284,6 @@ pcibios_init(void)
        if (pci_assign_all_buses && have_of)
                pcibios_make_OF_bus_map();
 
-       /* Do machine dependent PCI interrupt routing */
-       if (ppc_md.pci_swizzle && ppc_md.pci_map_irq)
-               pci_fixup_irqs(ppc_md.pci_swizzle, ppc_md.pci_map_irq);
-
        /* Call machine dependent fixup */
        if (ppc_md.pcibios_fixup)
                ppc_md.pcibios_fixup();
@@ -1308,25 +1306,6 @@ pcibios_init(void)
 
 subsys_initcall(pcibios_init);
 
-unsigned char __init
-common_swizzle(struct pci_dev *dev, unsigned char *pinp)
-{
-       struct pci_controller *hose = dev->sysdata;
-
-       if (dev->bus->number != hose->first_busno) {
-               u8 pin = *pinp;
-               do {
-                       pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
-                       /* Move up the chain of bridges. */
-                       dev = dev->bus->self;
-               } while (dev->bus->self);
-               *pinp = pin;
-
-               /* The slot is the idsel of the last bridge. */
-       }
-       return PCI_SLOT(dev->devfn);
-}
-
 unsigned long resource_fixup(struct pci_dev * dev, struct resource * res,
                             unsigned long start, unsigned long size)
 {
@@ -1338,6 +1317,7 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
        struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
        unsigned long io_offset;
        struct resource *res;
+       struct pci_dev *dev;
        int i;
 
        io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
@@ -1390,8 +1370,16 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
                }
        }
 
+       /* Platform specific bus fixups */
        if (ppc_md.pcibios_fixup_bus)
                ppc_md.pcibios_fixup_bus(bus);
+
+       /* Read default IRQs and fixup if necessary */
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               pci_read_irq_line(dev);
+               if (ppc_md.pci_irq_fixup)
+                       ppc_md.pci_irq_fixup(dev);
+       }
 }
 
 char __init *pcibios_setup(char *str)
@@ -1571,7 +1559,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
                *offset += hose->pci_mem_offset;
                res_bit = IORESOURCE_MEM;
        } else {
-               io_offset = hose->io_base_virt - ___IO_BASE;
+               io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
                *offset += io_offset;
                res_bit = IORESOURCE_IO;
        }
@@ -1826,7 +1814,8 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
                return;
 
        if (rsrc->flags & IORESOURCE_IO)
-               offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
+               offset = (void __iomem *)_IO_BASE - hose->io_base_virt
+                       + hose->io_base_phys;
 
        *start = rsrc->start + offset;
        *end = rsrc->end + offset;
@@ -1845,35 +1834,6 @@ pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
        res->child = NULL;
 }
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
-{
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len)
-               return NULL;
-       if (max && len > max)
-               len = max;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM)
-               /* Not checking IORESOURCE_CACHEABLE because PPC does
-                * not currently distinguish between ioremap and
-                * ioremap_nocache.
-                */
-               return ioremap(start, len);
-       /* What? */
-       return NULL;
-}
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-       /* Nothing to do */
-}
-EXPORT_SYMBOL(pci_iomap);
-EXPORT_SYMBOL(pci_iounmap);
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
        struct pci_controller* hose = hose_head;
index 9bae8a5bf671344a6c5dce261272a6159a64a181..6fa9a0a5c8dbb7f0a700c0e88048e7037caaa63f 100644 (file)
 unsigned long pci_probe_only = 1;
 int pci_assign_all_buses = 0;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static void fixup_resource(struct resource *res, struct pci_dev *dev);
 static void do_bus_setup(struct pci_bus *bus);
 static void phbs_remap_io(void);
-#endif
 
 /* pci_io_base -- the base address from which io bars are offsets.
  * This is the lowest I/O base address (so bar values are always positive),
@@ -63,7 +61,7 @@ void iSeries_pcibios_init(void);
 
 LIST_HEAD(hose_list);
 
-struct dma_mapping_ops pci_dma_ops;
+struct dma_mapping_ops *pci_dma_ops;
 EXPORT_SYMBOL(pci_dma_ops);
 
 int global_phb_number;         /* Global phb counter */
@@ -212,6 +210,10 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
 
 void pcibios_free_controller(struct pci_controller *phb)
 {
+       spin_lock(&hose_spinlock);
+       list_del(&phb->list_node);
+       spin_unlock(&hose_spinlock);
+
        if (phb->is_dynamic)
                kfree(phb);
 }
@@ -251,7 +253,6 @@ static void __init pcibios_claim_of_setup(void)
                pcibios_claim_one_bus(b);
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
 static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
 {
        const u32 *prop;
@@ -329,7 +330,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
        struct pci_dev *dev;
        const char *type;
 
-       dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
+       dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
        if (!dev)
                return NULL;
        type = get_property(node, "device_type", NULL);
@@ -338,7 +339,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
 
        DBG("    create device, devfn: %x, type: %s\n", devfn, type);
 
-       memset(dev, 0, sizeof(struct pci_dev));
        dev->bus = bus;
        dev->sysdata = node;
        dev->dev.parent = bus->bridge;
@@ -506,7 +506,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
                pci_scan_child_bus(bus);
 }
 EXPORT_SYMBOL(of_scan_pci_bridge);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
 
 void __devinit scan_phb(struct pci_controller *hose)
 {
@@ -517,7 +516,7 @@ void __devinit scan_phb(struct pci_controller *hose)
 
        DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
 
-       bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
+       bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
        if (bus == NULL) {
                printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
                       hose->global_number);
@@ -540,7 +539,7 @@ void __devinit scan_phb(struct pci_controller *hose)
        }
 
        mode = PCI_PROBE_NORMAL;
-#ifdef CONFIG_PPC_MULTIPLATFORM
+
        if (node && ppc_md.pci_probe_mode)
                mode = ppc_md.pci_probe_mode(bus);
        DBG("    probe mode: %d\n", mode);
@@ -548,7 +547,7 @@ void __devinit scan_phb(struct pci_controller *hose)
                bus->subordinate = hose->last_busno;
                of_scan_bus(node, bus);
        }
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+
        if (mode == PCI_PROBE_NORMAL)
                hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
 }
@@ -592,11 +591,9 @@ static int __init pcibios_init(void)
        if (ppc64_isabridge_dev != NULL)
                printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
                /* map in PCI I/O space */
                phbs_remap_io();
-#endif
 
        printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
 
@@ -873,8 +870,6 @@ void pcibios_add_platform_entries(struct pci_dev *pdev)
        device_create_file(&pdev->dev, &dev_attr_devspec);
 }
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
 #define ISA_SPACE_MASK 0x1
 #define ISA_SPACE_IO 0x1
 
@@ -975,11 +970,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
                res = NULL;
                pci_space = ranges[0];
                pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
-
-               cpu_phys_addr = ranges[3];
-               if (na >= 2)
-                       cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
-
+               cpu_phys_addr = of_translate_address(dev, &ranges[3]);
                size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
                ranges += np;
                if (size == 0)
@@ -1145,7 +1136,7 @@ int unmap_bus_range(struct pci_bus *bus)
        
        if (get_bus_io_range(bus, &start_phys, &start_virt, &size))
                return 1;
-       if (iounmap_explicit((void __iomem *) start_virt, size))
+       if (__iounmap_explicit((void __iomem *) start_virt, size))
                return 1;
 
        return 0;
@@ -1213,23 +1204,52 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
 }
 EXPORT_SYMBOL(pcibios_fixup_device_resources);
 
+void __devinit pcibios_setup_new_device(struct pci_dev *dev)
+{
+       struct dev_archdata *sd = &dev->dev.archdata;
+
+       sd->of_node = pci_device_to_OF_node(dev);
+
+       DBG("PCI device %s OF node: %s\n", pci_name(dev),
+           sd->of_node ? sd->of_node->full_name : "<none>");
+
+       sd->dma_ops = pci_dma_ops;
+#ifdef CONFIG_NUMA
+       sd->numa_node = pcibus_to_node(dev->bus);
+#else
+       sd->numa_node = -1;
+#endif
+       if (ppc_md.pci_dma_dev_setup)
+               ppc_md.pci_dma_dev_setup(dev);
+}
+EXPORT_SYMBOL(pcibios_setup_new_device);
 
 static void __devinit do_bus_setup(struct pci_bus *bus)
 {
        struct pci_dev *dev;
 
-       ppc_md.iommu_bus_setup(bus);
+       if (ppc_md.pci_dma_bus_setup)
+               ppc_md.pci_dma_bus_setup(bus);
 
        list_for_each_entry(dev, &bus->devices, bus_list)
-               ppc_md.iommu_dev_setup(dev);
+               pcibios_setup_new_device(dev);
 
-       if (ppc_md.irq_bus_setup)
-               ppc_md.irq_bus_setup(bus);
+       /* Read default IRQs and fixup if necessary */
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               pci_read_irq_line(dev);
+               if (ppc_md.pci_irq_fixup)
+                       ppc_md.pci_irq_fixup(dev);
+       }
 }
 
 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 {
        struct pci_dev *dev = bus->self;
+       struct device_node *np;
+
+       np = pci_bus_to_OF_node(bus);
+
+       DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>");
 
        if (dev && pci_probe_only &&
            (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
@@ -1343,8 +1363,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
        return NULL;
 }
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 unsigned long pci_address_to_pio(phys_addr_t address)
 {
        struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/pci_direct_iommu.c b/arch/powerpc/kernel/pci_direct_iommu.c
deleted file mode 100644 (file)
index 72ce082..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Support for DMA from PCI devices to main memory on
- * machines without an iommu or with directly addressable
- * RAM (typically a pmac with 2Gb of RAM or less)
- *
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/sections.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/ppc-pci.h>
-
-static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
-                                  dma_addr_t *dma_handle, gfp_t flag)
-{
-       void *ret;
-
-       ret = (void *)__get_free_pages(flag, get_order(size));
-       if (ret != NULL) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_abs(ret);
-       }
-       return ret;
-}
-
-static void pci_direct_free_coherent(struct device *hwdev, size_t size,
-                                void *vaddr, dma_addr_t dma_handle)
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t pci_direct_map_single(struct device *hwdev, void *ptr,
-               size_t size, enum dma_data_direction direction)
-{
-       return virt_to_abs(ptr);
-}
-
-static void pci_direct_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
-               size_t size, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_map_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
-{
-       int i;
-
-       for (i = 0; i < nents; i++, sg++) {
-               sg->dma_address = page_to_phys(sg->page) + sg->offset;
-               sg->dma_length = sg->length;
-       }
-
-       return nents;
-}
-
-static void pci_direct_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
-{
-}
-
-static int pci_direct_dma_supported(struct device *dev, u64 mask)
-{
-       return mask < 0x100000000ull;
-}
-
-static struct dma_mapping_ops pci_direct_ops = {
-       .alloc_coherent = pci_direct_alloc_coherent,
-       .free_coherent = pci_direct_free_coherent,
-       .map_single = pci_direct_map_single,
-       .unmap_single = pci_direct_unmap_single,
-       .map_sg = pci_direct_map_sg,
-       .unmap_sg = pci_direct_unmap_sg,
-       .dma_supported = pci_direct_dma_supported,
-};
-
-void __init pci_direct_iommu_init(void)
-{
-       pci_dma_ops = pci_direct_ops;
-}
diff --git a/arch/powerpc/kernel/pci_iommu.c b/arch/powerpc/kernel/pci_iommu.c
deleted file mode 100644 (file)
index 0688b25..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
- *
- * Rewrite, cleanup, new allocation schemes:
- * Copyright (C) 2004 Olof Johansson, IBM Corporation
- *
- * Dynamic DMA mapping support, platform-independent parts.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/pci-bridge.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-
-/*
- * We can use ->sysdata directly and avoid the extra work in
- * pci_device_to_OF_node since ->sysdata will have been initialised
- * in the iommu init code for all devices.
- */
-#define PCI_GET_DN(dev) ((struct device_node *)((dev)->sysdata))
-
-static inline struct iommu_table *device_to_table(struct device *hwdev)
-{
-       struct pci_dev *pdev;
-
-       if (!hwdev) {
-               pdev = ppc64_isabridge_dev;
-               if (!pdev)
-                       return NULL;
-       } else
-               pdev = to_pci_dev(hwdev);
-
-       return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
-}
-
-
-static inline unsigned long device_to_mask(struct device *hwdev)
-{
-       struct pci_dev *pdev;
-
-       if (!hwdev) {
-               pdev = ppc64_isabridge_dev;
-               if (!pdev) /* This is the best guess we can do */
-                       return 0xfffffffful;
-       } else
-               pdev = to_pci_dev(hwdev);
-
-       if (pdev->dma_mask)
-               return pdev->dma_mask;
-
-       /* Assume devices without mask can take 32 bit addresses */
-       return 0xfffffffful;
-}
-
-
-/* Allocates a contiguous real buffer and creates mappings over it.
- * Returns the virtual address of the buffer and sets dma_handle
- * to the dma address (mapping) of the first page.
- */
-static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag)
-{
-       return iommu_alloc_coherent(device_to_table(hwdev), size, dma_handle,
-                       device_to_mask(hwdev), flag,
-                       pcibus_to_node(to_pci_dev(hwdev)->bus));
-}
-
-static void pci_iommu_free_coherent(struct device *hwdev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-{
-       iommu_free_coherent(device_to_table(hwdev), size, vaddr, dma_handle);
-}
-
-/* Creates TCEs for a user provided buffer.  The user buffer must be 
- * contiguous real kernel storage (not vmalloc).  The address of the buffer
- * passed here is the kernel (virtual) address of the buffer.  The buffer
- * need not be page aligned, the dma_addr_t returned will point to the same
- * byte within the page as vaddr.
- */
-static dma_addr_t pci_iommu_map_single(struct device *hwdev, void *vaddr,
-               size_t size, enum dma_data_direction direction)
-{
-       return iommu_map_single(device_to_table(hwdev), vaddr, size,
-                               device_to_mask(hwdev), direction);
-}
-
-
-static void pci_iommu_unmap_single(struct device *hwdev, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction)
-{
-       iommu_unmap_single(device_to_table(hwdev), dma_handle, size, direction);
-}
-
-
-static int pci_iommu_map_sg(struct device *pdev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       return iommu_map_sg(pdev, device_to_table(pdev), sglist,
-                       nelems, device_to_mask(pdev), direction);
-}
-
-static void pci_iommu_unmap_sg(struct device *pdev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       iommu_unmap_sg(device_to_table(pdev), sglist, nelems, direction);
-}
-
-/* We support DMA to/from any memory page via the iommu */
-static int pci_iommu_dma_supported(struct device *dev, u64 mask)
-{
-       struct iommu_table *tbl = device_to_table(dev);
-
-       if (!tbl || tbl->it_offset > mask) {
-               printk(KERN_INFO "Warning: IOMMU table offset too big for device mask\n");
-               if (tbl)
-                       printk(KERN_INFO "mask: 0x%08lx, table offset: 0x%08lx\n",
-                               mask, tbl->it_offset);
-               else
-                       printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
-                               mask);
-               return 0;
-       } else
-               return 1;
-}
-
-struct dma_mapping_ops pci_iommu_ops = {
-       .alloc_coherent = pci_iommu_alloc_coherent,
-       .free_coherent = pci_iommu_free_coherent,
-       .map_single = pci_iommu_map_single,
-       .unmap_single = pci_iommu_unmap_single,
-       .map_sg = pci_iommu_map_sg,
-       .unmap_sg = pci_iommu_unmap_sg,
-       .dma_supported = pci_iommu_dma_supported,
-};
-
-void pci_iommu_init(void)
-{
-       pci_dma_ops = pci_iommu_ops;
-}
index 807193a3c784958e399e2fc2af3d5e51ee71cd7d..9179f0739ea2c3afb952fad59fa21017bd1fc403 100644 (file)
 #include <asm/commproc.h>
 #endif
 
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(local_irq_restore);
+#endif
+
 #ifdef CONFIG_PPC32
 extern void transfer_to_handler(void);
 extern void do_IRQ(struct pt_regs *regs);
index bdb412d4b74832fb62c4c530dffa6f7b8c635d25..c18dbe77fdc29863eb5a4f3e57c2f5f62e0f93dc 100644 (file)
@@ -538,35 +538,31 @@ static struct ibm_pa_feature {
        {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
 };
 
-static void __init check_cpu_pa_features(unsigned long node)
+static void __init scan_features(unsigned long node, unsigned char *ftrs,
+                                unsigned long tablelen,
+                                struct ibm_pa_feature *fp,
+                                unsigned long ft_size)
 {
-       unsigned char *pa_ftrs;
-       unsigned long len, tablelen, i, bit;
-
-       pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
-       if (pa_ftrs == NULL)
-               return;
+       unsigned long i, len, bit;
 
        /* find descriptor with type == 0 */
        for (;;) {
                if (tablelen < 3)
                        return;
-               len = 2 + pa_ftrs[0];
+               len = 2 + ftrs[0];
                if (tablelen < len)
                        return;         /* descriptor 0 not found */
-               if (pa_ftrs[1] == 0)
+               if (ftrs[1] == 0)
                        break;
                tablelen -= len;
-               pa_ftrs += len;
+               ftrs += len;
        }
 
        /* loop over bits we know about */
-       for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
-               struct ibm_pa_feature *fp = &ibm_pa_features[i];
-
-               if (fp->pabyte >= pa_ftrs[0])
+       for (i = 0; i < ft_size; ++i, ++fp) {
+               if (fp->pabyte >= ftrs[0])
                        continue;
-               bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
+               bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
                if (bit ^ fp->invert) {
                        cur_cpu_spec->cpu_features |= fp->cpu_features;
                        cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
@@ -577,16 +573,59 @@ static void __init check_cpu_pa_features(unsigned long node)
        }
 }
 
+static void __init check_cpu_pa_features(unsigned long node)
+{
+       unsigned char *pa_ftrs;
+       unsigned long tablelen;
+
+       pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
+       if (pa_ftrs == NULL)
+               return;
+
+       scan_features(node, pa_ftrs, tablelen,
+                     ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+}
+
+static struct feature_property {
+       const char *name;
+       u32 min_value;
+       unsigned long cpu_feature;
+       unsigned long cpu_user_ftr;
+} feature_properties[] __initdata = {
+#ifdef CONFIG_ALTIVEC
+       {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+       {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_PPC64
+       {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
+       {"ibm,purr", 1, CPU_FTR_PURR, 0},
+       {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
+#endif /* CONFIG_PPC64 */
+};
+
+static void __init check_cpu_feature_properties(unsigned long node)
+{
+       unsigned long i;
+       struct feature_property *fp = feature_properties;
+       const u32 *prop;
+
+       for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
+               prop = of_get_flat_dt_prop(node, fp->name, NULL);
+               if (prop && *prop >= fp->min_value) {
+                       cur_cpu_spec->cpu_features |= fp->cpu_feature;
+                       cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
+               }
+       }
+}
+
 static int __init early_init_dt_scan_cpus(unsigned long node,
                                          const char *uname, int depth,
                                          void *data)
 {
        static int logical_cpuid = 0;
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-#ifdef CONFIG_ALTIVEC
-       u32 *prop;
-#endif
-       u32 *intserv;
+       const u32 *prop;
+       const u32 *intserv;
        int i, nthreads;
        unsigned long len;
        int found = 0;
@@ -643,24 +682,27 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
                        intserv[i]);
                boot_cpuid = logical_cpuid;
                set_hard_smp_processor_id(boot_cpuid, intserv[i]);
-       }
 
-#ifdef CONFIG_ALTIVEC
-       /* Check if we have a VMX and eventually update CPU features */
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
-       if (prop && (*prop) > 0) {
-               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
-       }
-
-       /* Same goes for Apple's "altivec" property */
-       prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
-       if (prop) {
-               cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
-               cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+               /*
+                * PAPR defines "logical" PVR values for cpus that
+                * meet various levels of the architecture:
+                * 0x0f000001   Architecture version 2.04
+                * 0x0f000002   Architecture version 2.05
+                * If the cpu-version property in the cpu node contains
+                * such a value, we call identify_cpu again with the
+                * logical PVR value in order to use the cpu feature
+                * bits appropriate for the architecture level.
+                *
+                * A POWER6 partition in "POWER6 architected" mode
+                * uses the 0x0f000002 PVR value; in POWER5+ mode
+                * it uses 0x0f000001.
+                */
+               prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+               if (prop && (*prop & 0xff000000) == 0x0f000000)
+                       identify_cpu(0, *prop);
        }
-#endif /* CONFIG_ALTIVEC */
 
+       check_cpu_feature_properties(node);
        check_cpu_pa_features(node);
 
 #ifdef CONFIG_PPC_PSERIES
@@ -1674,6 +1716,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
        }
        return NULL;
 }
+EXPORT_SYMBOL(of_get_cpu_node);
 
 #ifdef DEBUG
 static struct debugfs_blob_wrapper flat_dt_blob;
index b91761639d96d33c96727958af19eb7c4a29a177..46cf32670ddb67cab23dfe986e686d7bea0ffbb7 100644 (file)
@@ -173,8 +173,8 @@ static unsigned long __initdata dt_string_start, dt_string_end;
 static unsigned long __initdata prom_initrd_start, prom_initrd_end;
 
 #ifdef CONFIG_PPC64
-static int __initdata iommu_force_on;
-static int __initdata ppc64_iommu_off;
+static int __initdata prom_iommu_force_on;
+static int __initdata prom_iommu_off;
 static unsigned long __initdata prom_tce_alloc_start;
 static unsigned long __initdata prom_tce_alloc_end;
 #endif
@@ -582,9 +582,9 @@ static void __init early_cmdline_parse(void)
                while (*opt && *opt == ' ')
                        opt++;
                if (!strncmp(opt, RELOC("off"), 3))
-                       RELOC(ppc64_iommu_off) = 1;
+                       RELOC(prom_iommu_off) = 1;
                else if (!strncmp(opt, RELOC("force"), 5))
-                       RELOC(iommu_force_on) = 1;
+                       RELOC(prom_iommu_force_on) = 1;
        }
 #endif
 }
@@ -627,6 +627,7 @@ static void __init early_cmdline_parse(void)
 /* Option vector 3: processor options supported */
 #define OV3_FP                 0x80    /* floating point */
 #define OV3_VMX                        0x40    /* VMX/Altivec */
+#define OV3_DFP                        0x20    /* decimal FP */
 
 /* Option vector 5: PAPR/OF options supported */
 #define OV5_LPAR               0x80    /* logical partitioning supported */
@@ -642,6 +643,7 @@ static void __init early_cmdline_parse(void)
 static unsigned char ibm_architecture_vec[] = {
        W(0xfffe0000), W(0x003a0000),   /* POWER5/POWER5+ */
        W(0xffff0000), W(0x003e0000),   /* POWER6 */
+       W(0xffffffff), W(0x0f000002),   /* all 2.05-compliant */
        W(0xfffffffe), W(0x0f000001),   /* all 2.04-compliant and earlier */
        5 - 1,                          /* 5 option vectors */
 
@@ -668,7 +670,7 @@ static unsigned char ibm_architecture_vec[] = {
        /* option vector 3: processor options supported */
        3 - 2,                          /* length */
        0,                              /* don't ignore, don't halt */
-       OV3_FP | OV3_VMX,
+       OV3_FP | OV3_VMX | OV3_DFP,
 
        /* option vector 4: IBM PAPR implementation */
        2 - 2,                          /* length */
@@ -1167,7 +1169,7 @@ static void __init prom_initialize_tce_table(void)
        u64 local_alloc_top, local_alloc_bottom;
        u64 i;
 
-       if (RELOC(ppc64_iommu_off))
+       if (RELOC(prom_iommu_off))
                return;
 
        prom_debug("starting prom_initialize_tce_table\n");
@@ -2283,11 +2285,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         * Fill in some infos for use by the kernel later on
         */
 #ifdef CONFIG_PPC64
-       if (RELOC(ppc64_iommu_off))
+       if (RELOC(prom_iommu_off))
                prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
                             NULL, 0);
 
-       if (RELOC(iommu_force_on))
+       if (RELOC(prom_iommu_force_on))
                prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
                             NULL, 0);
 
index 603dff3ad62adaa18ff6a24ac63c0ee7cfb30e7d..0dfbe1cd28eb2644cd59927eae4938fa8fdf088a 100644 (file)
 #define OF_CHECK_COUNTS(na, ns)        ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
                        (ns) > 0)
 
+static struct of_bus *of_match_bus(struct device_node *np);
+static int __of_address_to_resource(struct device_node *dev,
+               const u32 *addrp, u64 size, unsigned int flags,
+               struct resource *r);
+
+
 /* Debug utility */
 #ifdef DEBUG
 static void of_dump_addr(const char *s, const u32 *addr, int na)
@@ -101,6 +107,7 @@ static unsigned int of_bus_default_get_flags(const u32 *addr)
 }
 
 
+#ifdef CONFIG_PCI
 /*
  * PCI bus specific translator
  */
@@ -153,15 +160,156 @@ static unsigned int of_bus_pci_get_flags(const u32 *addr)
        switch((w >> 24) & 0x03) {
        case 0x01:
                flags |= IORESOURCE_IO;
+               break;
        case 0x02: /* 32 bits */
        case 0x03: /* 64 bits */
                flags |= IORESOURCE_MEM;
+               break;
        }
        if (w & 0x40000000)
                flags |= IORESOURCE_PREFETCH;
        return flags;
 }
 
+const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+                       unsigned int *flags)
+{
+       const u32 *prop;
+       unsigned int psize;
+       struct device_node *parent;
+       struct of_bus *bus;
+       int onesize, i, na, ns;
+
+       /* Get parent & match bus type */
+       parent = of_get_parent(dev);
+       if (parent == NULL)
+               return NULL;
+       bus = of_match_bus(parent);
+       if (strcmp(bus->name, "pci")) {
+               of_node_put(parent);
+               return NULL;
+       }
+       bus->count_cells(dev, &na, &ns);
+       of_node_put(parent);
+       if (!OF_CHECK_COUNTS(na, ns))
+               return NULL;
+
+       /* Get "reg" or "assigned-addresses" property */
+       prop = get_property(dev, bus->addresses, &psize);
+       if (prop == NULL)
+               return NULL;
+       psize /= 4;
+
+       onesize = na + ns;
+       for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+               if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+                       if (size)
+                               *size = of_read_number(prop + na, ns);
+                       if (flags)
+                               *flags = bus->get_flags(prop);
+                       return prop;
+               }
+       return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+                              struct resource *r)
+{
+       const u32       *addrp;
+       u64             size;
+       unsigned int    flags;
+
+       addrp = of_get_pci_address(dev, bar, &size, &flags);
+       if (addrp == NULL)
+               return -EINVAL;
+       return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
+{
+       return (((pin - 1) + slot) % 4) + 1;
+}
+
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+       struct device_node *dn, *ppnode;
+       struct pci_dev *ppdev;
+       u32 lspec;
+       u32 laddr[3];
+       u8 pin;
+       int rc;
+
+       /* Check if we have a device node, if yes, fallback to standard OF
+        * parsing
+        */
+       dn = pci_device_to_OF_node(pdev);
+       if (dn)
+               return of_irq_map_one(dn, 0, out_irq);
+
+       /* Ok, we don't, time to have fun. Let's start by building up an
+        * interrupt spec.  we assume #interrupt-cells is 1, which is standard
+        * for PCI. If you do different, then don't use that routine.
+        */
+       rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+       if (rc != 0)
+               return rc;
+       /* No pin, exit */
+       if (pin == 0)
+               return -ENODEV;
+
+       /* Now we walk up the PCI tree */
+       lspec = pin;
+       for (;;) {
+               /* Get the pci_dev of our parent */
+               ppdev = pdev->bus->self;
+
+               /* Ouch, it's a host bridge... */
+               if (ppdev == NULL) {
+#ifdef CONFIG_PPC64
+                       ppnode = pci_bus_to_OF_node(pdev->bus);
+#else
+                       struct pci_controller *host;
+                       host = pci_bus_to_host(pdev->bus);
+                       ppnode = host ? host->arch_data : NULL;
+#endif
+                       /* No node for host bridge ? give up */
+                       if (ppnode == NULL)
+                               return -EINVAL;
+               } else
+                       /* We found a P2P bridge, check if it has a node */
+                       ppnode = pci_device_to_OF_node(ppdev);
+
+               /* Ok, we have found a parent with a device-node, hand over to
+                * the OF parsing code.
+                * We build a unit address from the linux device to be used for
+                * resolution. Note that we use the linux bus number which may
+                * not match your firmware bus numbering.
+                * Fortunately, in most cases, interrupt-map-mask doesn't include
+                * the bus number as part of the matching.
+                * You should still be careful about that though if you intend
+                * to rely on this function (you ship  a firmware that doesn't
+                * create device nodes for all PCI devices).
+                */
+               if (ppnode)
+                       break;
+
+               /* We can only get here if we hit a P2P bridge with no node,
+                * let's do standard swizzling and try again
+                */
+               lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+               pdev = ppdev;
+       }
+
+       laddr[0] = (pdev->bus->number << 16)
+               | (pdev->devfn << 8);
+       laddr[1]  = laddr[2] = 0;
+       return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
+#endif /* CONFIG_PCI */
+
 /*
  * ISA bus specific translator
  */
@@ -223,6 +371,7 @@ static unsigned int of_bus_isa_get_flags(const u32 *addr)
  */
 
 static struct of_bus of_busses[] = {
+#ifdef CONFIG_PCI
        /* PCI */
        {
                .name = "pci",
@@ -233,6 +382,7 @@ static struct of_bus of_busses[] = {
                .translate = of_bus_pci_translate,
                .get_flags = of_bus_pci_get_flags,
        },
+#endif /* CONFIG_PCI */
        /* ISA */
        {
                .name = "isa",
@@ -445,48 +595,6 @@ const u32 *of_get_address(struct device_node *dev, int index, u64 *size,
 }
 EXPORT_SYMBOL(of_get_address);
 
-const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
-                       unsigned int *flags)
-{
-       const u32 *prop;
-       unsigned int psize;
-       struct device_node *parent;
-       struct of_bus *bus;
-       int onesize, i, na, ns;
-
-       /* Get parent & match bus type */
-       parent = of_get_parent(dev);
-       if (parent == NULL)
-               return NULL;
-       bus = of_match_bus(parent);
-       if (strcmp(bus->name, "pci")) {
-               of_node_put(parent);
-               return NULL;
-       }
-       bus->count_cells(dev, &na, &ns);
-       of_node_put(parent);
-       if (!OF_CHECK_COUNTS(na, ns))
-               return NULL;
-
-       /* Get "reg" or "assigned-addresses" property */
-       prop = get_property(dev, bus->addresses, &psize);
-       if (prop == NULL)
-               return NULL;
-       psize /= 4;
-
-       onesize = na + ns;
-       for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
-               if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
-                       if (size)
-                               *size = of_read_number(prop + na, ns);
-                       if (flags)
-                               *flags = bus->get_flags(prop);
-                       return prop;
-               }
-       return NULL;
-}
-EXPORT_SYMBOL(of_get_pci_address);
-
 static int __of_address_to_resource(struct device_node *dev, const u32 *addrp,
                                    u64 size, unsigned int flags,
                                    struct resource *r)
@@ -529,20 +637,6 @@ int of_address_to_resource(struct device_node *dev, int index,
 }
 EXPORT_SYMBOL_GPL(of_address_to_resource);
 
-int of_pci_address_to_resource(struct device_node *dev, int bar,
-                              struct resource *r)
-{
-       const u32       *addrp;
-       u64             size;
-       unsigned int    flags;
-
-       addrp = of_get_pci_address(dev, bar, &size, &flags);
-       if (addrp == NULL)
-               return -EINVAL;
-       return __of_address_to_resource(dev, addrp, size, flags, r);
-}
-EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-
 void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
                unsigned long *busno, unsigned long *phys, unsigned long *size)
 {
@@ -898,87 +992,3 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
        return res;
 }
 EXPORT_SYMBOL_GPL(of_irq_map_one);
-
-#ifdef CONFIG_PCI
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
-       return (((pin - 1) + slot) % 4) + 1;
-}
-
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
-{
-       struct device_node *dn, *ppnode;
-       struct pci_dev *ppdev;
-       u32 lspec;
-       u32 laddr[3];
-       u8 pin;
-       int rc;
-
-       /* Check if we have a device node, if yes, fallback to standard OF
-        * parsing
-        */
-       dn = pci_device_to_OF_node(pdev);
-       if (dn)
-               return of_irq_map_one(dn, 0, out_irq);
-
-       /* Ok, we don't, time to have fun. Let's start by building up an
-        * interrupt spec.  we assume #interrupt-cells is 1, which is standard
-        * for PCI. If you do different, then don't use that routine.
-        */
-       rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
-       if (rc != 0)
-               return rc;
-       /* No pin, exit */
-       if (pin == 0)
-               return -ENODEV;
-
-       /* Now we walk up the PCI tree */
-       lspec = pin;
-       for (;;) {
-               /* Get the pci_dev of our parent */
-               ppdev = pdev->bus->self;
-
-               /* Ouch, it's a host bridge... */
-               if (ppdev == NULL) {
-#ifdef CONFIG_PPC64
-                       ppnode = pci_bus_to_OF_node(pdev->bus);
-#else
-                       struct pci_controller *host;
-                       host = pci_bus_to_host(pdev->bus);
-                       ppnode = host ? host->arch_data : NULL;
-#endif
-                       /* No node for host bridge ? give up */
-                       if (ppnode == NULL)
-                               return -EINVAL;
-               } else
-                       /* We found a P2P bridge, check if it has a node */
-                       ppnode = pci_device_to_OF_node(ppdev);
-
-               /* Ok, we have found a parent with a device-node, hand over to
-                * the OF parsing code.
-                * We build a unit address from the linux device to be used for
-                * resolution. Note that we use the linux bus number which may
-                * not match your firmware bus numbering.
-                * Fortunately, in most cases, interrupt-map-mask doesn't include
-                * the bus number as part of the matching.
-                * You should still be careful about that though if you intend
-                * to rely on this function (you ship  a firmware that doesn't
-                * create device nodes for all PCI devices).
-                */
-               if (ppnode)
-                       break;
-
-               /* We can only get here if we hit a P2P bridge with no node,
-                * let's do standard swizzling and try again
-                */
-               lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
-               pdev = ppdev;
-       }
-
-       laddr[0] = (pdev->bus->number << 16)
-               | (pdev->devfn << 8);
-       laddr[1]  = laddr[2] = 0;
-       return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq);
-}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
-#endif /* CONFIG_PCI */
index 6ef80d4e38d3c08cdfe9e18aa0dd4634b43fe76a..387ed0d9ad618b3597a7f0931af169993ba103ee 100644 (file)
@@ -810,9 +810,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
        return 0;
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
 /* This version can't take the spinlock, because it never returns */
-
-struct rtas_args rtas_stop_self_args = {
+static struct rtas_args rtas_stop_self_args = {
        /* The token is initialized for real in setup_system() */
        .token = RTAS_UNKNOWN_SERVICE,
        .nargs = 0,
@@ -834,6 +834,7 @@ void rtas_stop_self(void)
 
        panic("Alas, I survived.\n");
 }
+#endif
 
 /*
  * Call early during boot, before mem init or bootmem, to retrieve the RTAS
index 6f6fc977cb399a189c0d530dc44badf1c4a021ce..7d0f13fecc0eb8fcce93470ed07ce9d79d660708 100644 (file)
@@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */
 static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
 
 /* Use slab cache to guarantee 4k alignment */
-static kmem_cache_t *flash_block_cache = NULL;
+static struct kmem_cache *flash_block_cache = NULL;
 
 #define FLASH_BLOCK_LIST_VERSION (1UL)
 
@@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
 }
 
 /* constructor for flash_block_cache */
-void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
 {
        memset(ptr, 0, RTAS_BLK_SIZE);
 }
@@ -681,14 +681,12 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
        int *status;
        int token;
 
-       dp->data = kmalloc(buf_size, GFP_KERNEL);
+       dp->data = kzalloc(buf_size, GFP_KERNEL);
        if (dp->data == NULL) {
                remove_flash_pde(dp);
                return -ENOMEM;
        }
 
-       memset(dp->data, 0, buf_size);
-
        /*
         * This code assumes that the status int is the first member of the
         * struct 
index b4a0de79c0600e306aaae78aae5a84bf3c94cf00..ace9f4c86e670f7e8a66cec053b553e27317b506 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/rtas.h>
 #include <asm/mpic.h>
 #include <asm/ppc-pci.h>
+#include <asm/eeh.h>
 
 /* RTAS tokens */
 static int read_pci_config;
@@ -231,32 +232,13 @@ void __init init_pci_config_tokens (void)
 
 unsigned long __devinit get_phb_buid (struct device_node *phb)
 {
-       int addr_cells;
-       const unsigned int *buid_vals;
-       unsigned int len;
-       unsigned long buid;
-
-       if (ibm_read_pci_config == -1) return 0;
+       struct resource r;
 
-       /* PHB's will always be children of the root node,
-        * or so it is promised by the current firmware. */
-       if (phb->parent == NULL)
+       if (ibm_read_pci_config == -1)
                return 0;
-       if (phb->parent->parent)
-               return 0;
-
-       buid_vals = get_property(phb, "reg", &len);
-       if (buid_vals == NULL)
+       if (of_address_to_resource(phb, 0, &r))
                return 0;
-
-       addr_cells = prom_n_addr_cells(phb);
-       if (addr_cells == 1) {
-               buid = (unsigned long) buid_vals[0];
-       } else {
-               buid = (((unsigned long)buid_vals[0]) << 32UL) |
-                       (((unsigned long)buid_vals[1]) & 0xffffffff);
-       }
-       return buid;
+       return r.start;
 }
 
 static int phb_set_bus_ranges(struct device_node *dev,
@@ -276,8 +258,10 @@ static int phb_set_bus_ranges(struct device_node *dev,
        return 0;
 }
 
-int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb)
+int __devinit rtas_setup_phb(struct pci_controller *phb)
 {
+       struct device_node *dev = phb->arch_data;
+
        if (is_python(dev))
                python_countermeasures(dev);
 
@@ -309,7 +293,7 @@ unsigned long __init find_and_init_phbs(void)
                phb = pcibios_alloc_controller(node);
                if (!phb)
                        continue;
-               setup_phb(node, phb);
+               rtas_setup_phb(phb);
                pci_process_bridge_OF_ranges(phb, node, 0);
                pci_setup_phb_io(phb, index == 0);
                index++;
@@ -381,7 +365,6 @@ int pcibios_remove_root_bus(struct pci_controller *phb)
                }
        }
 
-       list_del(&phb->list_node);
        pcibios_free_controller(phb);
 
        return 0;
index a4c2964a3ca6b325f86a52eb1c7880c534ff43d7..61c65d19ef0618409feeedc778077765acc63b0f 100644 (file)
@@ -63,10 +63,6 @@ unsigned int DMA_MODE_WRITE;
 
 int have_of = 1;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-dev_t boot_dev;
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 #ifdef CONFIG_VGA_CONSOLE
 unsigned long vgacon_remap_base;
 #endif
@@ -101,7 +97,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
         * Identify the CPU type and fix up code sections
         * that depend on which cpu we have.
         */
-       spec = identify_cpu(offset);
+       spec = identify_cpu(offset, mfspr(SPRN_PVR));
 
        do_feature_fixups(spec->cpu_features,
                          PTRRELOC(&__start___ftr_fixup),
index 16278968dab68e2153a284709e2b4dfdd067adb4..3733de30e84dc07f6a52df31bf3f4506d31ca3ca 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/serial.h>
 #include <linux/serial_8250.h>
 #include <linux/bootmem.h>
+#include <linux/pci.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -71,7 +72,6 @@
 
 int have_of = 1;
 int boot_cpuid = 0;
-dev_t boot_dev;
 u64 ppc64_pft_size;
 
 /* Pick defaults since we might want to patch instructions
@@ -171,7 +171,7 @@ void __init setup_paca(int cpu)
 void __init early_setup(unsigned long dt_ptr)
 {
        /* Identify CPU type */
-       identify_cpu(0);
+       identify_cpu(0, mfspr(SPRN_PVR));
 
        /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
        setup_paca(0);
@@ -226,8 +226,8 @@ void early_setup_secondary(void)
 {
        struct paca_struct *lpaca = get_paca();
 
-       /* Mark enabled in PACA */
-       lpaca->proc_enabled = 0;
+       /* Mark interrupts enabled in PACA */
+       lpaca->soft_enabled = 0;
 
        /* Initialize hash table for that CPU */
        htab_initialize_secondary();
@@ -392,7 +392,8 @@ void __init setup_system(void)
         * setting up the hash table pointers. It also sets up some interrupt-mapping
         * related options that will be used by finish_device_tree()
         */
-       ppc_md.init_early();
+       if (ppc_md.init_early)
+               ppc_md.init_early();
 
        /*
         * We can discover serial ports now since the above did setup the
@@ -598,3 +599,10 @@ void __init setup_per_cpu_areas(void)
        }
 }
 #endif
+
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+struct ppc_pci_io ppc_pci_io;
+EXPORT_SYMBOL(ppc_pci_io);
+#endif /* CONFIG_PPC_INDIRECT_IO */
+
index 320353f0926f99334aa3821d30cca981f28b4ff5..e4ebe1a6228e5e9a9ad32da2dd7f601f5f6afc61 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/stddef.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #endif
 
 #include <asm/uaccess.h>
index de59c6c31a5b81f1cc3f28ff12af611d4b558df5..bc892e69b4f732cd11c46a5ccb79be51eaf34fae 100644 (file)
@@ -78,7 +78,7 @@ static int __devinit start_contest(int cmd, long offset, int num)
 {
        int i, score=0;
        u64 tb;
-       long mark;
+       u64 mark;
 
        tbsync->cmd = cmd;
 
@@ -116,8 +116,7 @@ void __devinit smp_generic_give_timebase(void)
        printk("Synchronizing timebase\n");
 
        /* if this fails then this kernel won't work anyway... */
-       tbsync = kmalloc( sizeof(*tbsync), GFP_KERNEL );
-       memset( tbsync, 0, sizeof(*tbsync) );
+       tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
        mb();
        running = 1;
 
index 35c6309bdb76a4dbb97616dbb08f94c2bccb8168..9b28c238b6c0f4b2025dfb8ac5be28857cf3136e 100644 (file)
@@ -65,6 +65,7 @@ cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
 EXPORT_SYMBOL(cpu_online_map);
 EXPORT_SYMBOL(cpu_possible_map);
+EXPORT_SYMBOL(cpu_sibling_map);
 
 /* SMP operations for this machine */
 struct smp_ops_t *smp_ops;
index d15c33e95959998dde17fdbd053244f0ecfa80f7..03a2a2f30d66dfcf555881fd05bed1228de6dbd7 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/time.h>
 #include <asm/mmu_context.h>
 #include <asm/ppc-pci.h>
+#include <asm/syscalls.h>
 
 /* readdir & getdents */
 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
index d45a168bdacaaff0dc7092e50772102046566eb1..63ed265b7f0936765a1cc7bc9356404acf679e68 100644 (file)
@@ -200,10 +200,9 @@ static void register_cpu_online(unsigned int cpu)
        struct cpu *c = &per_cpu(cpu_devices, cpu);
        struct sys_device *s = &c->sysdev;
 
-#ifndef CONFIG_PPC_ISERIES
-       if (cpu_has_feature(CPU_FTR_SMT))
+       if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+                       cpu_has_feature(CPU_FTR_SMT))
                sysdev_create_file(s, &attr_smt_snooze_delay);
-#endif
 
        /* PMC stuff */
 
@@ -240,12 +239,11 @@ static void unregister_cpu_online(unsigned int cpu)
        struct cpu *c = &per_cpu(cpu_devices, cpu);
        struct sys_device *s = &c->sysdev;
 
-       BUG_ON(c->no_control);
+       BUG_ON(!c->hotpluggable);
 
-#ifndef CONFIG_PPC_ISERIES
-       if (cpu_has_feature(CPU_FTR_SMT))
+       if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
+                       cpu_has_feature(CPU_FTR_SMT))
                sysdev_remove_file(s, &attr_smt_snooze_delay);
-#endif
 
        /* PMC stuff */
 
@@ -299,6 +297,72 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
        .notifier_call  = sysfs_cpu_notify,
 };
 
+static DEFINE_MUTEX(cpu_mutex);
+
+int cpu_add_sysdev_attr(struct sysdev_attribute *attr)
+{
+       int cpu;
+
+       mutex_lock(&cpu_mutex);
+
+       for_each_possible_cpu(cpu) {
+               sysdev_create_file(get_cpu_sysdev(cpu), attr);
+       }
+
+       mutex_unlock(&cpu_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr);
+
+int cpu_add_sysdev_attr_group(struct attribute_group *attrs)
+{
+       int cpu;
+       struct sys_device *sysdev;
+
+       mutex_lock(&cpu_mutex);
+
+       for_each_possible_cpu(cpu) {
+               sysdev = get_cpu_sysdev(cpu);
+               sysfs_create_group(&sysdev->kobj, attrs);
+       }
+
+       mutex_unlock(&cpu_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group);
+
+
+void cpu_remove_sysdev_attr(struct sysdev_attribute *attr)
+{
+       int cpu;
+
+       mutex_lock(&cpu_mutex);
+
+       for_each_possible_cpu(cpu) {
+               sysdev_remove_file(get_cpu_sysdev(cpu), attr);
+       }
+
+       mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr);
+
+void cpu_remove_sysdev_attr_group(struct attribute_group *attrs)
+{
+       int cpu;
+       struct sys_device *sysdev;
+
+       mutex_lock(&cpu_mutex);
+
+       for_each_possible_cpu(cpu) {
+               sysdev = get_cpu_sysdev(cpu);
+               sysfs_remove_group(&sysdev->kobj, attrs);
+       }
+
+       mutex_unlock(&cpu_mutex);
+}
+EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group);
+
+
 /* NUMA stuff */
 
 #ifdef CONFIG_NUMA
@@ -360,10 +424,10 @@ static int __init topology_init(void)
                 * CPU.  For instance, the boot cpu might never be valid
                 * for hotplugging.
                 */
-               if (!ppc_md.cpu_die)
-                       c->no_control = 1;
+               if (ppc_md.cpu_die)
+                       c->hotpluggable = 1;
 
-               if (cpu_online(cpu) || (c->no_control == 0)) {
+               if (cpu_online(cpu) || c->hotpluggable) {
                        register_cpu(c, cpu);
 
                        sysdev_create_file(&c->sysdev, &attr_physical_id);
index 46a24de36fec4bd93acd1fd09983380cf57e5abc..f6f0c6b07c4cf335e41357def28b7388bf07d793 100644 (file)
@@ -631,7 +631,8 @@ void timer_interrupt(struct pt_regs * regs)
        calculate_steal_time();
 
 #ifdef CONFIG_PPC_ISERIES
-       get_lppaca()->int_dword.fields.decr_int = 0;
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
        while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -674,7 +675,7 @@ void timer_interrupt(struct pt_regs * regs)
        set_dec(next_dec);
 
 #ifdef CONFIG_PPC_ISERIES
-       if (hvlpevent_is_pending())
+       if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
                process_hvlpevents();
 #endif
 
@@ -774,7 +775,7 @@ int do_settimeofday(struct timespec *tv)
         * settimeofday to perform this operation.
         */
 #ifdef CONFIG_PPC_ISERIES
-       if (first_settimeofday) {
+       if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) {
                iSeries_tb_recal();
                first_settimeofday = 0;
        }
index c66b4771ef445e264194bbc8ac039ce9297b31e2..0d4e203fa7a05d90751ff0e7e60cb891f413d6af 100644 (file)
 #endif
 #include <asm/kexec.h>
 
-#ifdef CONFIG_PPC64    /* XXX */
-#define _IO_BASE       pci_io_base
-#endif
-
 #ifdef CONFIG_DEBUGGER
 int (*__debugger)(struct pt_regs *regs);
 int (*__debugger_ipi)(struct pt_regs *regs);
@@ -241,7 +237,7 @@ void system_reset_exception(struct pt_regs *regs)
  */
 static inline int check_io_access(struct pt_regs *regs)
 {
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC32
        unsigned long msr = regs->msr;
        const struct exception_table_entry *entry;
        unsigned int *nip = (unsigned int *)regs->nip;
@@ -274,7 +270,7 @@ static inline int check_io_access(struct pt_regs *regs)
                        return 1;
                }
        }
-#endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
+#endif /* CONFIG_PPC32 */
        return 0;
 }
 
index c913ad5cad2918e3daebe1251de37a18292f67c9..a4b28c73bba067e3fc7734b1c87dc46b67f1ceea 100644 (file)
@@ -264,7 +264,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
 
 
        /* Allocate a VMA structure and fill it up */
-       vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (vma == NULL) {
                rc = -ENOMEM;
                goto fail_mmapsem;
index ed007878d1bf977f311f941d0c3351c96968da63..a80f8f1d2e5d9e1062562614dd87b6fe7e5614f7 100644 (file)
@@ -81,15 +81,15 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
                struct iommu_table *tbl;
                unsigned long offset, size;
 
-               dma_window = get_property(dev->dev.platform_data,
-                               "ibm,my-dma-window", NULL);
+               dma_window = get_property(dev->dev.archdata.of_node,
+                                         "ibm,my-dma-window", NULL);
                if (!dma_window)
                        return NULL;
 
                tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
 
-               of_parse_dma_window(dev->dev.platform_data, dma_window,
-                               &tbl->it_index, &offset, &size);
+               of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
+                                   &tbl->it_index, &offset, &size);
 
                /* TCE table size - measured in tce entries */
                tbl->it_size = size >> IOMMU_PAGE_SHIFT;
@@ -117,7 +117,8 @@ static const struct vio_device_id *vio_match_device(
 {
        while (ids->type[0] != '\0') {
                if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
-                   device_is_compatible(dev->dev.platform_data, ids->compat))
+                   device_is_compatible(dev->dev.archdata.of_node,
+                                        ids->compat))
                        return ids;
                ids++;
        }
@@ -198,9 +199,9 @@ EXPORT_SYMBOL(vio_unregister_driver);
 /* vio_dev refcount hit 0 */
 static void __devinit vio_dev_release(struct device *dev)
 {
-       if (dev->platform_data) {
-               /* XXX free TCE table */
-               of_node_put(dev->platform_data);
+       if (dev->archdata.of_node) {
+               /* XXX should free TCE table */
+               of_node_put(dev->archdata.of_node);
        }
        kfree(to_vio_dev(dev));
 }
@@ -210,7 +211,7 @@ static void __devinit vio_dev_release(struct device *dev)
  * @of_node:   The OF node for this device.
  *
  * Creates and initializes a vio_dev structure from the data in
- * of_node (dev.platform_data) and adds it to the list of virtual devices.
+ * of_node and adds it to the list of virtual devices.
  * Returns a pointer to the created vio_dev or NULL if node has
  * NULL device_type or compatible fields.
  */
@@ -240,8 +241,6 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
        if (viodev == NULL)
                return NULL;
 
-       viodev->dev.platform_data = of_node_get(of_node);
-
        viodev->irq = irq_of_parse_and_map(of_node, 0);
 
        snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
@@ -254,7 +253,10 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
                if (unit_address != NULL)
                        viodev->unit_address = *unit_address;
        }
-       viodev->iommu_table = vio_build_iommu_table(viodev);
+       viodev->dev.archdata.of_node = of_node_get(of_node);
+       viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+       viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
+       viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
 
        /* init generic 'struct device' fields: */
        viodev->dev.parent = &vio_bus_device.dev;
@@ -285,10 +287,11 @@ static int __init vio_bus_init(void)
 #ifdef CONFIG_PPC_ISERIES
        if (firmware_has_feature(FW_FEATURE_ISERIES)) {
                iommu_vio_init();
-               vio_bus_device.iommu_table = &vio_iommu_table;
+               vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
+               vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
                iSeries_vio_dev = &vio_bus_device.dev;
        }
-#endif
+#endif /* CONFIG_PPC_ISERIES */
 
        err = bus_register(&vio_bus_type);
        if (err) {
@@ -336,7 +339,7 @@ static ssize_t name_show(struct device *dev,
 static ssize_t devspec_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
-       struct device_node *of_node = dev->platform_data;
+       struct device_node *of_node = dev->archdata.of_node;
 
        return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
 }
@@ -353,62 +356,6 @@ void __devinit vio_unregister_device(struct vio_dev *viodev)
 }
 EXPORT_SYMBOL(vio_unregister_device);
 
-static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
-                         size_t size, enum dma_data_direction direction)
-{
-       return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
-                       ~0ul, direction);
-}
-
-static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
-                     size_t size, enum dma_data_direction direction)
-{
-       iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
-                       direction);
-}
-
-static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
-                       nelems, ~0ul, direction);
-}
-
-static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction)
-{
-       iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
-}
-
-static void *vio_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag)
-{
-       return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
-                       dma_handle, ~0ul, flag, -1);
-}
-
-static void vio_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
-{
-       iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
-                       dma_handle);
-}
-
-static int vio_dma_supported(struct device *dev, u64 mask)
-{
-       return 1;
-}
-
-struct dma_mapping_ops vio_dma_ops = {
-       .alloc_coherent = vio_alloc_coherent,
-       .free_coherent = vio_free_coherent,
-       .map_single = vio_map_single,
-       .unmap_single = vio_unmap_single,
-       .map_sg = vio_map_sg,
-       .unmap_sg = vio_unmap_sg,
-       .dma_supported = vio_dma_supported,
-};
-
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
        const struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -422,13 +369,14 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp,
                        char *buffer, int buffer_size)
 {
        const struct vio_dev *vio_dev = to_vio_dev(dev);
-       struct device_node *dn = dev->platform_data;
+       struct device_node *dn;
        const char *cp;
        int length;
 
        if (!num_envp)
                return -ENOMEM;
 
+       dn = dev->archdata.of_node;
        if (!dn)
                return -ENODEV;
        cp = get_property(dn, "compatible", &length);
@@ -465,7 +413,7 @@ struct bus_type vio_bus_type = {
 */
 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
 {
-       return get_property(vdev->dev.platform_data, which, length);
+       return get_property(vdev->dev.archdata.of_node, which, length);
 }
 EXPORT_SYMBOL(vio_get_attribute);
 
index e8342d86753675d0ad20b3866bcdb12165d30d49..04b98671a0608d6ced7470712291bde24d3e01ac 100644 (file)
@@ -33,6 +33,7 @@ SECTIONS
 
        /* Text and gots */
        .text : {
+               _text = .;
                *(.text .text.*)
                SCHED_TEXT
                LOCK_TEXT
index 93441e7a2921457cb50623b89e97833cecb1f0b6..38a81967ca0702a1f0f04b4de7aa8712fe79627d 100644 (file)
@@ -8,7 +8,7 @@ endif
 
 obj-y                          := fault.o mem.o lmb.o
 obj-$(CONFIG_PPC32)            += init_32.o pgtable_32.o mmu_context_32.o
-hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
+hash-$(CONFIG_PPC_NATIVE)      := hash_native_64.o
 obj-$(CONFIG_PPC64)            += init_64.o pgtable_64.o mmu_context_64.o \
                                   hash_utils_64.o hash_low_64.o tlb_64.o \
                                   slb_low.o slb.o stab.o mmap.o imalloc.o \
index e8fa50624b70dae41c27ce669e10f472b18970a8..03aeb3a460772528b009e3f240e735ffd463047d 100644 (file)
@@ -426,18 +426,21 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 
        /* kernel has accessed a bad area */
 
-       printk(KERN_ALERT "Unable to handle kernel paging request for ");
        switch (regs->trap) {
-               case 0x300:
-               case 0x380:
-                       printk("data at address 0x%08lx\n", regs->dar);
-                       break;
-               case 0x400:
-               case 0x480:
-                       printk("instruction fetch\n");
-                       break;
-               default:
-                       printk("unknown fault\n");
+       case 0x300:
+       case 0x380:
+               printk(KERN_ALERT "Unable to handle kernel paging request for "
+                       "data at address 0x%08lx\n", regs->dar);
+               break;
+       case 0x400:
+       case 0x480:
+               printk(KERN_ALERT "Unable to handle kernel paging request for "
+                       "instruction fetch\n");
+               break;
+       default:
+               printk(KERN_ALERT "Unable to handle kernel paging request for "
+                       "unknown fault\n");
+               break;
        }
        printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
                regs->nip);
index c90f124f3c71caed159f9eb04b6ab5b85d6194ed..6f1016acdbf691a9c4dbed2b65a111cdc1dff4d2 100644 (file)
@@ -123,7 +123,7 @@ static inline void native_unlock_hpte(hpte_t *hptep)
        clear_bit(HPTE_LOCK_BIT, word);
 }
 
-long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
                        unsigned long pa, unsigned long rflags,
                        unsigned long vflags, int psize)
 {
index 1915661c2c817b65944877d4943b688cc273cc1d..c0d2a694fa3031e5d7ab8d6cce1a371f3a71d044 100644 (file)
@@ -277,7 +277,7 @@ static void __init htab_init_page_sizes(void)
         * Not in the device-tree, let's fallback on known size
         * list for 16M capable GP & GR
         */
-       if (cpu_has_feature(CPU_FTR_16M_PAGE) && !machine_is(iseries))
+       if (cpu_has_feature(CPU_FTR_16M_PAGE))
                memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
                       sizeof(mmu_psize_defaults_gp));
  found:
index 506d89768d455ba8625ed480d4122c7d1c8264c9..89c836d548096a96110c020a0383f5a711bf40fa 100644 (file)
@@ -146,6 +146,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
        return hugepte_offset(hpdp, addr);
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
 {
        pte_t *hugepte = hugepd_page(*hpdp);
@@ -1042,7 +1047,7 @@ repeat:
        return err;
 }
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
        memset(addr, 0, kmem_cache_size(cache));
 }
index 3ff374697e34945f15276ede561035b5aafccb3e..d12a87ec5ae99a5a5fde14e02d9e0bc64d50579d 100644 (file)
@@ -130,7 +130,7 @@ static int __init setup_kcore(void)
                /* GFP_ATOMIC to avoid might_sleep warnings during boot */
                kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
                if (!kcore_mem)
-                       panic("mem_init: kmalloc failed\n");
+                       panic("%s: kmalloc failed\n", __FUNCTION__);
 
                kclist_add(kcore_mem, __va(base), size);
        }
@@ -141,7 +141,7 @@ static int __init setup_kcore(void)
 }
 module_init(setup_kcore);
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
        memset(addr, 0, kmem_cache_size(cache));
 }
@@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
 /* Hugepages need one extra cache, initialized in hugetlbpage.c.  We
  * can't put into the tables above, because HPAGE_SHIFT is not compile
  * time constant. */
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
 #else
-kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
 #endif
 
 void pgtable_cache_init(void)
index 8fcacb0239da8a9caedd78196159cc88307628b2..1891dbeeb8e9deb7ac88718fbb4680f54b579558 100644 (file)
@@ -141,29 +141,19 @@ void pte_free(struct page *ptepage)
        __free_page(ptepage);
 }
 
-#ifndef CONFIG_PHYS_64BIT
 void __iomem *
 ioremap(phys_addr_t addr, unsigned long size)
 {
        return __ioremap(addr, size, _PAGE_NO_CACHE);
 }
-#else /* CONFIG_PHYS_64BIT */
-void __iomem *
-ioremap64(unsigned long long addr, unsigned long size)
-{
-       return __ioremap(addr, size, _PAGE_NO_CACHE);
-}
-EXPORT_SYMBOL(ioremap64);
+EXPORT_SYMBOL(ioremap);
 
 void __iomem *
-ioremap(phys_addr_t addr, unsigned long size)
+ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
 {
-       phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
-
-       return ioremap64(addr64, size);
+       return __ioremap(addr, size, flags);
 }
-#endif /* CONFIG_PHYS_64BIT */
-EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
 
 void __iomem *
 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
@@ -264,20 +254,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-void __iomem *ioport_map(unsigned long port, unsigned int len)
-{
-       return (void __iomem *) (port + _IO_BASE);
-}
-
-void ioport_unmap(void __iomem *addr)
-{
-       /* Nothing to do */
-}
-EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
-
-int
-map_page(unsigned long va, phys_addr_t pa, int flags)
+int map_page(unsigned long va, phys_addr_t pa, int flags)
 {
        pmd_t *pd;
        pte_t *pg;
index ac64f4aaa5091b0fa48246eff049318303dc7bc2..16e4ee1c2318e726c8e2bf9df3b6a9b1ec867197 100644 (file)
@@ -113,7 +113,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
 }
 
 
-static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
+static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
                            unsigned long ea, unsigned long size,
                            unsigned long flags)
 {
@@ -129,22 +129,12 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
        return (void __iomem *) (ea + (addr & ~PAGE_MASK));
 }
 
-
-void __iomem *
-ioremap(unsigned long addr, unsigned long size)
-{
-       return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
-}
-
-void __iomem * __ioremap(unsigned long addr, unsigned long size,
+void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
                         unsigned long flags)
 {
        unsigned long pa, ea;
        void __iomem *ret;
 
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               return (void __iomem *)addr;
-
        /*
         * Choose an address to map it to.
         * Once the imalloc system is running, we use it.
@@ -178,9 +168,28 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
        return ret;
 }
 
+
+void __iomem * ioremap(phys_addr_t addr, unsigned long size)
+{
+       unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
+
+       if (ppc_md.ioremap)
+               return ppc_md.ioremap(addr, size, flags);
+       return __ioremap(addr, size, flags);
+}
+
+void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
+                            unsigned long flags)
+{
+       if (ppc_md.ioremap)
+               return ppc_md.ioremap(addr, size, flags);
+       return __ioremap(addr, size, flags);
+}
+
+
 #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
 
-int __ioremap_explicit(unsigned long pa, unsigned long ea,
+int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
                       unsigned long size, unsigned long flags)
 {
        struct vm_struct *area;
@@ -235,13 +244,10 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
  *
  * XXX what about calls before mem_init_done (ie python_countermeasures())
  */
-void iounmap(volatile void __iomem *token)
+void __iounmap(volatile void __iomem *token)
 {
        void *addr;
 
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               return;
-
        if (!mem_init_done)
                return;
        
@@ -250,6 +256,14 @@ void iounmap(volatile void __iomem *token)
        im_free(addr);
 }
 
+void iounmap(volatile void __iomem *token)
+{
+       if (ppc_md.iounmap)
+               ppc_md.iounmap(token);
+       else
+               __iounmap(token);
+}
+
 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
 {
        struct vm_struct *area;
@@ -268,7 +282,7 @@ static int iounmap_subset_regions(unsigned long addr, unsigned long size)
        return 0;
 }
 
-int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
 {
        struct vm_struct *area;
        unsigned long addr;
@@ -303,8 +317,10 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
 }
 
 EXPORT_SYMBOL(ioremap);
+EXPORT_SYMBOL(ioremap_flags);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(__iounmap);
 
 void __iomem * reserve_phb_iospace(unsigned long size)
 {
index d3733912adb43e0c75641f66b32dc1dd2ef62dcb..224e960650a09ce3e1face7057fd8643d58a9e0d 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
 #include <linux/compiler.h>
 
 #ifdef DEBUG
@@ -193,6 +194,7 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
 void slb_initialize(void)
 {
        unsigned long linear_llp, vmalloc_llp, io_llp;
+       unsigned long lflags, vflags;
        static int slb_encoding_inited;
        extern unsigned int *slb_miss_kernel_load_linear;
        extern unsigned int *slb_miss_kernel_load_io;
@@ -225,11 +227,12 @@ void slb_initialize(void)
 #endif
        }
 
+       get_paca()->stab_rr = SLB_NUM_BOLTED;
+
        /* On iSeries the bolted entries have already been set up by
         * the hypervisor from the lparMap data in head.S */
-#ifndef CONFIG_PPC_ISERIES
- {
-       unsigned long lflags, vflags;
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               return;
 
        lflags = SLB_VSID_KERNEL | linear_llp;
        vflags = SLB_VSID_KERNEL | vmalloc_llp;
@@ -247,8 +250,4 @@ void slb_initialize(void)
         * elsewhere, we'll call _switch() which will bolt in the new
         * one. */
        asm volatile("isync":::"memory");
- }
-#endif /* CONFIG_PPC_ISERIES */
-
-       get_paca()->stab_rr = SLB_NUM_BOLTED;
 }
index 0b5df9c96ae0a4d1560724e4ddf9d7ca2d1b1f37..4ccef2d5530cc476e7a0aed788a9952183bb129d 100644 (file)
@@ -11,6 +11,7 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
                timer_int.o )
 
 oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
+oprofile-$(CONFIG_PPC_CELL_NATIVE) += op_model_cell.o
 oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
 oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
 oprofile-$(CONFIG_6xx) += op_model_7450.o
index 63bbef3b63f18be04701394cb17b255d1c42b20c..b6d82390b6a6a8bbf854b04d72135ba91439da97 100644 (file)
@@ -69,7 +69,10 @@ static void op_powerpc_cpu_start(void *dummy)
 
 static int op_powerpc_start(void)
 {
-       on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
+       if (model->global_start)
+               model->global_start(ctr);
+       if (model->start)
+               on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
        return 0;
 }
 
@@ -80,7 +83,10 @@ static inline void op_powerpc_cpu_stop(void *dummy)
 
 static void op_powerpc_stop(void)
 {
-       on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+       if (model->stop)
+               on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+        if (model->global_stop)
+                model->global_stop();
 }
 
 static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
@@ -141,6 +147,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        switch (cur_cpu_spec->oprofile_type) {
 #ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_CELL_NATIVE
+               case PPC_OPROFILE_CELL:
+                       model = &op_model_cell;
+                       break;
+#endif
                case PPC_OPROFILE_RS64:
                        model = &op_model_rs64;
                        break;
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
new file mode 100644 (file)
index 0000000..2eb15f3
--- /dev/null
@@ -0,0 +1,724 @@
+/*
+ * Cell Broadband Engine OProfile Support
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author: David Erb (djerb@us.ibm.com)
+ * Modifications:
+ *         Carl Love <carll@us.ibm.com>
+ *         Maynard Johnson <maynardj@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/oprofile.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/cell-pmu.h>
+#include <asm/cputable.h>
+#include <asm/firmware.h>
+#include <asm/io.h>
+#include <asm/oprofile_impl.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/rtas.h>
+#include <asm/system.h>
+
+#include "../platforms/cell/interrupt.h"
+
+#define PPU_CYCLES_EVENT_NUM 1 /*  event number for CYCLES */
+#define CBE_COUNT_ALL_CYCLES 0x42800000        /* PPU cycle event specifier */
+
+#define NUM_THREADS 2
+#define VIRT_CNTR_SW_TIME_NS 100000000 // 0.5 seconds
+
+struct pmc_cntrl_data {
+       unsigned long vcntr;
+       unsigned long evnts;
+       unsigned long masks;
+       unsigned long enabled;
+};
+
+/*
+ * ibm,cbe-perftools rtas parameters
+ */
+
+struct pm_signal {
+       u16 cpu;                /* Processor to modify */
+       u16 sub_unit;           /* hw subunit this applies to (if applicable) */
+       u16 signal_group;       /* Signal Group to Enable/Disable */
+       u8 bus_word;            /* Enable/Disable on this Trace/Trigger/Event
+                                * Bus Word(s) (bitmask)
+                                */
+       u8 bit;                 /* Trigger/Event bit (if applicable) */
+};
+
+/*
+ * rtas call arguments
+ */
+enum {
+       SUBFUNC_RESET = 1,
+       SUBFUNC_ACTIVATE = 2,
+       SUBFUNC_DEACTIVATE = 3,
+
+       PASSTHRU_IGNORE = 0,
+       PASSTHRU_ENABLE = 1,
+       PASSTHRU_DISABLE = 2,
+};
+
+struct pm_cntrl {
+       u16 enable;
+       u16 stop_at_max;
+       u16 trace_mode;
+       u16 freeze;
+       u16 count_mode;
+};
+
+static struct {
+       u32 group_control;
+       u32 debug_bus_control;
+       struct pm_cntrl pm_cntrl;
+       u32 pm07_cntrl[NR_PHYS_CTRS];
+} pm_regs;
+
+
+#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
+#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
+#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
+#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
+#define GET_COUNT_CYCLES(x) (x & 0x00000001)
+#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
+
+
+static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
+
+static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
+
+/* Interpetation of hdw_thread:
+ * 0 - even virtual cpus 0, 2, 4,...
+ * 1 - odd virtual cpus 1, 3, 5, ...
+ */
+static u32 hdw_thread;
+
+static u32 virt_cntr_inter_mask;
+static struct timer_list timer_virt_cntr;
+
+/* pm_signal needs to be global since it is initialized in
+ * cell_reg_setup at the time when the necessary information
+ * is available.
+ */
+static struct pm_signal pm_signal[NR_PHYS_CTRS];
+static int pm_rtas_token;
+
+static u32 reset_value[NR_PHYS_CTRS];
+static int num_counters;
+static int oprofile_running;
+static spinlock_t virt_cntr_lock = SPIN_LOCK_UNLOCKED;
+
+static u32 ctr_enabled;
+
+static unsigned char trace_bus[4];
+static unsigned char input_bus[2];
+
+/*
+ * Firmware interface functions
+ */
+static int
+rtas_ibm_cbe_perftools(int subfunc, int passthru,
+                      void *address, unsigned long length)
+{
+       u64 paddr = __pa(address);
+
+       return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc, passthru,
+                        paddr >> 32, paddr & 0xffffffff, length);
+}
+
+static void pm_rtas_reset_signals(u32 node)
+{
+       int ret;
+       struct pm_signal pm_signal_local;
+
+       /*  The debug bus is being set to the passthru disable state.
+        *  However, the FW still expects atleast one legal signal routing
+        *  entry or it will return an error on the arguments.  If we don't
+        *  supply a valid entry, we must ignore all return values.  Ignoring
+        *  all return values means we might miss an error we should be
+        *  concerned about.
+        */
+
+       /*  fw expects physical cpu #. */
+       pm_signal_local.cpu = node;
+       pm_signal_local.signal_group = 21;
+       pm_signal_local.bus_word = 1;
+       pm_signal_local.sub_unit = 0;
+       pm_signal_local.bit = 0;
+
+       ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
+                                    &pm_signal_local,
+                                    sizeof(struct pm_signal));
+
+       if (ret)
+               printk(KERN_WARNING "%s: rtas returned: %d\n",
+                      __FUNCTION__, ret);
+}
+
+static void pm_rtas_activate_signals(u32 node, u32 count)
+{
+       int ret;
+       int j;
+       struct pm_signal pm_signal_local[NR_PHYS_CTRS];
+
+       for (j = 0; j < count; j++) {
+               /* fw expects physical cpu # */
+               pm_signal_local[j].cpu = node;
+               pm_signal_local[j].signal_group = pm_signal[j].signal_group;
+               pm_signal_local[j].bus_word = pm_signal[j].bus_word;
+               pm_signal_local[j].sub_unit = pm_signal[j].sub_unit;
+               pm_signal_local[j].bit = pm_signal[j].bit;
+       }
+
+       ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
+                                    pm_signal_local,
+                                    count * sizeof(struct pm_signal));
+
+       if (ret)
+               printk(KERN_WARNING "%s: rtas returned: %d\n",
+                      __FUNCTION__, ret);
+}
+
+/*
+ * PM Signal functions
+ */
+static void set_pm_event(u32 ctr, int event, u32 unit_mask)
+{
+       struct pm_signal *p;
+       u32 signal_bit;
+       u32 bus_word, bus_type, count_cycles, polarity, input_control;
+       int j, i;
+
+       if (event == PPU_CYCLES_EVENT_NUM) {
+               /* Special Event: Count all cpu cycles */
+               pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
+               p = &(pm_signal[ctr]);
+               p->signal_group = 21;
+               p->bus_word = 1;
+               p->sub_unit = 0;
+               p->bit = 0;
+               goto out;
+       } else {
+               pm_regs.pm07_cntrl[ctr] = 0;
+       }
+
+       bus_word = GET_BUS_WORD(unit_mask);
+       bus_type = GET_BUS_TYPE(unit_mask);
+       count_cycles = GET_COUNT_CYCLES(unit_mask);
+       polarity = GET_POLARITY(unit_mask);
+       input_control = GET_INPUT_CONTROL(unit_mask);
+       signal_bit = (event % 100);
+
+       p = &(pm_signal[ctr]);
+
+       p->signal_group = event / 100;
+       p->bus_word = bus_word;
+       p->sub_unit = unit_mask & 0x0000f000;
+
+       pm_regs.pm07_cntrl[ctr] = 0;
+       pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
+       pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
+       pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
+
+       if (input_control == 0) {
+               if (signal_bit > 31) {
+                       signal_bit -= 32;
+                       if (bus_word == 0x3)
+                               bus_word = 0x2;
+                       else if (bus_word == 0xc)
+                               bus_word = 0x8;
+               }
+
+               if ((bus_type == 0) && p->signal_group >= 60)
+                       bus_type = 2;
+               if ((bus_type == 1) && p->signal_group >= 50)
+                       bus_type = 0;
+
+               pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
+       } else {
+               pm_regs.pm07_cntrl[ctr] = 0;
+               p->bit = signal_bit;
+       }
+
+       for (i = 0; i < 4; i++) {
+               if (bus_word & (1 << i)) {
+                       pm_regs.debug_bus_control |=
+                           (bus_type << (31 - (2 * i) + 1));
+
+                       for (j = 0; j < 2; j++) {
+                               if (input_bus[j] == 0xff) {
+                                       input_bus[j] = i;
+                                       pm_regs.group_control |=
+                                           (i << (31 - i));
+                                       break;
+                               }
+                       }
+               }
+       }
+out:
+       ;
+}
+
+static void write_pm_cntrl(int cpu, struct pm_cntrl *pm_cntrl)
+{
+       /* Oprofile will use 32 bit counters, set bits 7:10 to 0 */
+       u32 val = 0;
+       if (pm_cntrl->enable == 1)
+               val |= CBE_PM_ENABLE_PERF_MON;
+
+       if (pm_cntrl->stop_at_max == 1)
+               val |= CBE_PM_STOP_AT_MAX;
+
+       if (pm_cntrl->trace_mode == 1)
+               val |= CBE_PM_TRACE_MODE_SET(pm_cntrl->trace_mode);
+
+       if (pm_cntrl->freeze == 1)
+               val |= CBE_PM_FREEZE_ALL_CTRS;
+
+       /* Routine set_count_mode must be called previously to set
+        * the count mode based on the user selection of user and kernel.
+        */
+       val |= CBE_PM_COUNT_MODE_SET(pm_cntrl->count_mode);
+       cbe_write_pm(cpu, pm_control, val);
+}
+
+static inline void
+set_count_mode(u32 kernel, u32 user, struct pm_cntrl *pm_cntrl)
+{
+       /* The user must specify user and kernel if they want them. If
+        *  neither is specified, OProfile will count in hypervisor mode
+        */
+       if (kernel) {
+               if (user)
+                       pm_cntrl->count_mode = CBE_COUNT_ALL_MODES;
+               else
+                       pm_cntrl->count_mode = CBE_COUNT_SUPERVISOR_MODE;
+       } else {
+               if (user)
+                       pm_cntrl->count_mode = CBE_COUNT_PROBLEM_MODE;
+               else
+                       pm_cntrl->count_mode = CBE_COUNT_HYPERVISOR_MODE;
+       }
+}
+
+static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
+{
+
+       pm07_cntrl[ctr] |= PM07_CTR_ENABLE(1);
+       cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
+}
+
+/*
+ * Oprofile is expected to collect data on all CPUs simultaneously.
+ * However, there is one set of performance counters per node.  There are
+ * two hardware threads or virtual CPUs on each node.  Hence, OProfile must
+ * multiplex in time the performance counter collection on the two virtual
+ * CPUs.  The multiplexing of the performance counters is done by this
+ * virtual counter routine.
+ *
+ * The pmc_values used below is defined as 'per-cpu' but its use is
+ * more akin to 'per-node'.  We need to store two sets of counter
+ * values per node -- one for the previous run and one for the next.
+ * The per-cpu[NR_PHYS_CTRS] gives us the storage we need.  Each odd/even
+ * pair of per-cpu arrays is used for storing the previous and next
+ * pmc values for a given node.
+ * NOTE: We use the per-cpu variable to improve cache performance.
+ */
+static void cell_virtual_cntr(unsigned long data)
+{
+       /* This routine will alternate loading the virtual counters for
+        * virtual CPUs
+        */
+       int i, prev_hdw_thread, next_hdw_thread;
+       u32 cpu;
+       unsigned long flags;
+
+       /* Make sure that the interrupt_hander and
+        * the virt counter are not both playing with
+        * the counters on the same node.
+        */
+
+       spin_lock_irqsave(&virt_cntr_lock, flags);
+
+       prev_hdw_thread = hdw_thread;
+
+       /* switch the cpu handling the interrupts */
+       hdw_thread = 1 ^ hdw_thread;
+       next_hdw_thread = hdw_thread;
+
+       /* The following is done only once per each node, but
+        * we need cpu #, not node #, to pass to the cbe_xxx functions.
+        */
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               /* stop counters, save counter values, restore counts
+                * for previous thread
+                */
+               cbe_disable_pm(cpu);
+               cbe_disable_pm_interrupts(cpu);
+               for (i = 0; i < num_counters; i++) {
+                       per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
+                           = cbe_read_ctr(cpu, i);
+
+                       if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
+                           == 0xFFFFFFFF)
+                               /* If the cntr value is 0xffffffff, we must
+                                * reset that to 0xfffffff0 when the current
+                                * thread is restarted.  This will generate a new
+                                * interrupt and make sure that we never restore
+                                * the counters to the max value.  If the counters
+                                * were restored to the max value, they do not
+                                * increment and no interrupts are generated.  Hence
+                                * no more samples will be collected on that cpu.
+                                */
+                               cbe_write_ctr(cpu, i, 0xFFFFFFF0);
+                       else
+                               cbe_write_ctr(cpu, i,
+                                             per_cpu(pmc_values,
+                                                     cpu +
+                                                     next_hdw_thread)[i]);
+               }
+
+               /* Switch to the other thread. Change the interrupt
+                * and control regs to be scheduled on the CPU
+                * corresponding to the thread to execute.
+                */
+               for (i = 0; i < num_counters; i++) {
+                       if (pmc_cntrl[next_hdw_thread][i].enabled) {
+                               /* There are some per thread events.
+                                * Must do the set event, enable_cntr
+                                * for each cpu.
+                                */
+                               set_pm_event(i,
+                                    pmc_cntrl[next_hdw_thread][i].evnts,
+                                    pmc_cntrl[next_hdw_thread][i].masks);
+                               enable_ctr(cpu, i,
+                                          pm_regs.pm07_cntrl);
+                       } else {
+                               cbe_write_pm07_control(cpu, i, 0);
+                       }
+               }
+
+               /* Enable interrupts on the CPU thread that is starting */
+               cbe_enable_pm_interrupts(cpu, next_hdw_thread,
+                                        virt_cntr_inter_mask);
+               cbe_enable_pm(cpu);
+       }
+
+       spin_unlock_irqrestore(&virt_cntr_lock, flags);
+
+       mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
+}
+
+static void start_virt_cntrs(void)
+{
+       init_timer(&timer_virt_cntr);
+       timer_virt_cntr.function = cell_virtual_cntr;
+       timer_virt_cntr.data = 0UL;
+       timer_virt_cntr.expires = jiffies + HZ / 10;
+       add_timer(&timer_virt_cntr);
+}
+
+/* This function is called once for all cpus combined */
+static void
+cell_reg_setup(struct op_counter_config *ctr,
+              struct op_system_config *sys, int num_ctrs)
+{
+       int i, j, cpu;
+
+       pm_rtas_token = rtas_token("ibm,cbe-perftools");
+       if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+               printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+                      __FUNCTION__);
+               goto out;
+       }
+
+       num_counters = num_ctrs;
+
+       pm_regs.group_control = 0;
+       pm_regs.debug_bus_control = 0;
+
+       /* setup the pm_control register */
+       memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
+       pm_regs.pm_cntrl.stop_at_max = 1;
+       pm_regs.pm_cntrl.trace_mode = 0;
+       pm_regs.pm_cntrl.freeze = 1;
+
+       set_count_mode(sys->enable_kernel, sys->enable_user,
+                      &pm_regs.pm_cntrl);
+
+       /* Setup the thread 0 events */
+       for (i = 0; i < num_ctrs; ++i) {
+
+               pmc_cntrl[0][i].evnts = ctr[i].event;
+               pmc_cntrl[0][i].masks = ctr[i].unit_mask;
+               pmc_cntrl[0][i].enabled = ctr[i].enabled;
+               pmc_cntrl[0][i].vcntr = i;
+
+               for_each_possible_cpu(j)
+                       per_cpu(pmc_values, j)[i] = 0;
+       }
+
+       /* Setup the thread 1 events, map the thread 0 event to the
+        * equivalent thread 1 event.
+        */
+       for (i = 0; i < num_ctrs; ++i) {
+               if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
+                       pmc_cntrl[1][i].evnts = ctr[i].event + 19;
+               else if (ctr[i].event == 2203)
+                       pmc_cntrl[1][i].evnts = ctr[i].event;
+               else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
+                       pmc_cntrl[1][i].evnts = ctr[i].event + 16;
+               else
+                       pmc_cntrl[1][i].evnts = ctr[i].event;
+
+               pmc_cntrl[1][i].masks = ctr[i].unit_mask;
+               pmc_cntrl[1][i].enabled = ctr[i].enabled;
+               pmc_cntrl[1][i].vcntr = i;
+       }
+
+       for (i = 0; i < 4; i++)
+               trace_bus[i] = 0xff;
+
+       for (i = 0; i < 2; i++)
+               input_bus[i] = 0xff;
+
+       /* Our counters count up, and "count" refers to
+        * how much before the next interrupt, and we interrupt
+        * on overflow.  So we calculate the starting value
+        * which will give us "count" until overflow.
+        * Then we set the events on the enabled counters.
+        */
+       for (i = 0; i < num_counters; ++i) {
+               /* start with virtual counter set 0 */
+               if (pmc_cntrl[0][i].enabled) {
+                       /* Using 32bit counters, reset max - count */
+                       reset_value[i] = 0xFFFFFFFF - ctr[i].count;
+                       set_pm_event(i,
+                                    pmc_cntrl[0][i].evnts,
+                                    pmc_cntrl[0][i].masks);
+
+                       /* global, used by cell_cpu_setup */
+                       ctr_enabled |= (1 << i);
+               }
+       }
+
+       /* initialize the previous counts for the virtual cntrs */
+       for_each_online_cpu(cpu)
+               for (i = 0; i < num_counters; ++i) {
+                       per_cpu(pmc_values, cpu)[i] = reset_value[i];
+               }
+out:
+       ;
+}
+
+/* This function is called once for each cpu */
+static void cell_cpu_setup(struct op_counter_config *cntr)
+{
+       u32 cpu = smp_processor_id();
+       u32 num_enabled = 0;
+       int i;
+
+       /* There is one performance monitor per processor chip (i.e. node),
+        * so we only need to perform this function once per node.
+        */
+       if (cbe_get_hw_thread_id(cpu))
+               goto out;
+
+       if (pm_rtas_token == RTAS_UNKNOWN_SERVICE) {
+               printk(KERN_WARNING "%s: RTAS_UNKNOWN_SERVICE\n",
+                      __FUNCTION__);
+               goto out;
+       }
+
+       /* Stop all counters */
+       cbe_disable_pm(cpu);
+       cbe_disable_pm_interrupts(cpu);
+
+       cbe_write_pm(cpu, pm_interval, 0);
+       cbe_write_pm(cpu, pm_start_stop, 0);
+       cbe_write_pm(cpu, group_control, pm_regs.group_control);
+       cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
+       write_pm_cntrl(cpu, &pm_regs.pm_cntrl);
+
+       for (i = 0; i < num_counters; ++i) {
+               if (ctr_enabled & (1 << i)) {
+                       pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
+                       num_enabled++;
+               }
+       }
+
+       pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
+out:
+       ;
+}
+
+static void cell_global_start(struct op_counter_config *ctr)
+{
+       u32 cpu;
+       u32 interrupt_mask = 0;
+       u32 i;
+
+       /* This routine gets called once for the system.
+        * There is one performance monitor per node, so we
+        * only need to perform this function once per node.
+        */
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               interrupt_mask = 0;
+
+               for (i = 0; i < num_counters; ++i) {
+                       if (ctr_enabled & (1 << i)) {
+                               cbe_write_ctr(cpu, i, reset_value[i]);
+                               enable_ctr(cpu, i, pm_regs.pm07_cntrl);
+                               interrupt_mask |=
+                                   CBE_PM_CTR_OVERFLOW_INTR(i);
+                       } else {
+                               /* Disable counter */
+                               cbe_write_pm07_control(cpu, i, 0);
+                       }
+               }
+
+               cbe_clear_pm_interrupts(cpu);
+               cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
+               cbe_enable_pm(cpu);
+       }
+
+       virt_cntr_inter_mask = interrupt_mask;
+       oprofile_running = 1;
+       smp_wmb();
+
+       /* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
+        * executed which manipulates the PMU.  We start the "virtual counter"
+        * here so that we do not need to synchronize access to the PMU in
+        * the above for-loop.
+        */
+       start_virt_cntrs();
+}
+
+static void cell_global_stop(void)
+{
+       int cpu;
+
+       /* This routine will be called once for the system.
+        * There is one performance monitor per node, so we
+        * only need to perform this function once per node.
+        */
+       del_timer_sync(&timer_virt_cntr);
+       oprofile_running = 0;
+       smp_wmb();
+
+       for_each_online_cpu(cpu) {
+               if (cbe_get_hw_thread_id(cpu))
+                       continue;
+
+               cbe_sync_irq(cbe_cpu_to_node(cpu));
+               /* Stop the counters */
+               cbe_disable_pm(cpu);
+
+               /* Deactivate the signals */
+               pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
+
+               /* Deactivate interrupts */
+               cbe_disable_pm_interrupts(cpu);
+       }
+}
+
+static void
+cell_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr)
+{
+       u32 cpu;
+       u64 pc;
+       int is_kernel;
+       unsigned long flags = 0;
+       u32 interrupt_mask;
+       int i;
+
+       cpu = smp_processor_id();
+
+       /* Need to make sure the interrupt handler and the virt counter
+        * routine are not running at the same time. See the
+        * cell_virtual_cntr() routine for additional comments.
+        */
+       spin_lock_irqsave(&virt_cntr_lock, flags);
+
+       /* Need to disable and reenable the performance counters
+        * to get the desired behavior from the hardware.  This
+        * is hardware specific.
+        */
+
+       cbe_disable_pm(cpu);
+
+       interrupt_mask = cbe_clear_pm_interrupts(cpu);
+
+       /* If the interrupt mask has been cleared, then the virt cntr
+        * has cleared the interrupt.  When the thread that generated
+        * the interrupt is restored, the data count will be restored to
+        * 0xffffff0 to cause the interrupt to be regenerated.
+        */
+
+       if ((oprofile_running == 1) && (interrupt_mask != 0)) {
+               pc = regs->nip;
+               is_kernel = is_kernel_addr(pc);
+
+               for (i = 0; i < num_counters; ++i) {
+                       if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
+                           && ctr[i].enabled) {
+                               oprofile_add_pc(pc, is_kernel, i);
+                               cbe_write_ctr(cpu, i, reset_value[i]);
+                       }
+               }
+
+               /* The counters were frozen by the interrupt.
+                * Reenable the interrupt and restart the counters.
+                * If there was a race between the interrupt handler and
+                * the virtual counter routine.  The virutal counter
+                * routine may have cleared the interrupts.  Hence must
+                * use the virt_cntr_inter_mask to re-enable the interrupts.
+                */
+               cbe_enable_pm_interrupts(cpu, hdw_thread,
+                                        virt_cntr_inter_mask);
+
+               /* The writes to the various performance counters only writes
+                * to a latch.  The new values (interrupt setting bits, reset
+                * counter value etc.) are not copied to the actual registers
+                * until the performance monitor is enabled.  In order to get
+                * this to work as desired, the permormance monitor needs to
+                * be disabled while writting to the latches.  This is a
+                * HW design issue.
+                */
+               cbe_enable_pm(cpu);
+       }
+       spin_unlock_irqrestore(&virt_cntr_lock, flags);
+}
+
+struct op_powerpc_model op_model_cell = {
+       .reg_setup = cell_reg_setup,
+       .cpu_setup = cell_cpu_setup,
+       .global_start = cell_global_start,
+       .global_stop = cell_global_stop,
+       .handle_interrupt = cell_handle_interrupt,
+};
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
new file mode 100644 (file)
index 0000000..a46184a
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile for 52xx based boards
+#
+ifeq ($(CONFIG_PPC_MERGE),y)
+obj-y                          += mpc52xx_pic.o mpc52xx_common.o
+endif
+
+obj-$(CONFIG_PPC_EFIKA)                += efika-setup.o efika-pci.o
+obj-$(CONFIG_PPC_LITE5200)     += lite5200.o
diff --git a/arch/powerpc/platforms/52xx/efika-pci.c b/arch/powerpc/platforms/52xx/efika-pci.c
new file mode 100644 (file)
index 0000000..62e05b2
--- /dev/null
@@ -0,0 +1,119 @@
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/sections.h>
+#include <asm/pci-bridge.h>
+#include <asm/rtas.h>
+
+#include "efika.h"
+
+#ifdef CONFIG_PCI
+/*
+ * Access functions for PCI config space using RTAS calls.
+ */
+static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+                           int len, u32 * val)
+{
+       struct pci_controller *hose = bus->sysdata;
+       unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+           | (((bus->number - hose->first_busno) & 0xff) << 16)
+           | (hose->index << 24);
+       int ret = -1;
+       int rval;
+
+       rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
+       *val = ret;
+       return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int rtas_write_config(struct pci_bus *bus, unsigned int devfn,
+                            int offset, int len, u32 val)
+{
+       struct pci_controller *hose = bus->sysdata;
+       unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
+           | (((bus->number - hose->first_busno) & 0xff) << 16)
+           | (hose->index << 24);
+       int rval;
+
+       rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
+                        addr, len, val);
+       return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops rtas_pci_ops = {
+       rtas_read_config,
+       rtas_write_config
+};
+
+void __init efika_pcisetup(void)
+{
+       const int *bus_range;
+       int len;
+       struct pci_controller *hose;
+       struct device_node *root;
+       struct device_node *pcictrl;
+
+       root = of_find_node_by_path("/");
+       if (root == NULL) {
+               printk(KERN_WARNING EFIKA_PLATFORM_NAME
+                      ": Unable to find the root node\n");
+               return;
+       }
+
+       for (pcictrl = NULL;;) {
+               pcictrl = of_get_next_child(root, pcictrl);
+               if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0))
+                       break;
+       }
+
+       of_node_put(root);
+
+       if (pcictrl == NULL) {
+               printk(KERN_WARNING EFIKA_PLATFORM_NAME
+                      ": Unable to find the PCI bridge node\n");
+               return;
+       }
+
+       bus_range = get_property(pcictrl, "bus-range", &len);
+       if (bus_range == NULL || len < 2 * sizeof(int)) {
+               printk(KERN_WARNING EFIKA_PLATFORM_NAME
+                      ": Can't get bus-range for %s\n", pcictrl->full_name);
+               return;
+       }
+
+       if (bus_range[1] == bus_range[0])
+               printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d",
+                      bus_range[0]);
+       else
+               printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
+                      bus_range[0], bus_range[1]);
+       printk(" controlled by %s\n", pcictrl->full_name);
+       printk("\n");
+
+       hose = pcibios_alloc_controller();
+       if (!hose) {
+               printk(KERN_WARNING EFIKA_PLATFORM_NAME
+                      ": Can't allocate PCI controller structure for %s\n",
+                      pcictrl->full_name);
+               return;
+       }
+
+       hose->arch_data = of_node_get(pcictrl);
+       hose->first_busno = bus_range[0];
+       hose->last_busno = bus_range[1];
+       hose->ops = &rtas_pci_ops;
+
+       pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+}
+
+#else
+void __init efika_pcisetup(void)
+{}
+#endif
diff --git a/arch/powerpc/platforms/52xx/efika-setup.c b/arch/powerpc/platforms/52xx/efika-setup.c
new file mode 100644 (file)
index 0000000..110c980
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ *
+ * Efika 5K2 platform setup
+ * Some code really inspired from the lite5200b platform.
+ * 
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/utsrelease.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/machdep.h>
+#include <asm/rtas.h>
+#include <asm/of_device.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+#include "efika.h"
+
+static void efika_show_cpuinfo(struct seq_file *m)
+{
+       struct device_node *root;
+       const char *revision = NULL;
+       const char *codegendescription = NULL;
+       const char *codegenvendor = NULL;
+
+       root = of_find_node_by_path("/");
+       if (root) {
+               revision = get_property(root, "revision", NULL);
+               codegendescription =
+                   get_property(root, "CODEGEN,description", NULL);
+               codegenvendor = get_property(root, "CODEGEN,vendor", NULL);
+
+               of_node_put(root);
+       }
+
+       if (codegendescription)
+               seq_printf(m, "machine\t\t: %s\n", codegendescription);
+       else
+               seq_printf(m, "machine\t\t: Efika\n");
+
+       if (revision)
+               seq_printf(m, "revision\t: %s\n", revision);
+
+       if (codegenvendor)
+               seq_printf(m, "vendor\t\t: %s\n", codegenvendor);
+
+       of_node_put(root);
+}
+
+static void __init efika_setup_arch(void)
+{
+       rtas_initialize();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       initrd_below_start_ok = 1;
+
+       if (initrd_start)
+               ROOT_DEV = Root_RAM0;
+       else
+#endif
+               ROOT_DEV = Root_SDA2;   /* sda2 (sda1 is for the kernel) */
+
+       efika_pcisetup();
+
+       if (ppc_md.progress)
+               ppc_md.progress("Linux/PPC " UTS_RELEASE " runnung on Efika ;-)\n", 0x0);
+}
+
+static void __init efika_init(void)
+{
+       struct device_node *np;
+       struct device_node *cnp = NULL;
+       const u32 *base;
+
+       /* Find every child of the SOC node and add it to of_platform */
+       np = of_find_node_by_name(NULL, "builtin");
+       if (np) {
+               char name[BUS_ID_SIZE];
+               while ((cnp = of_get_next_child(np, cnp))) {
+                       strcpy(name, cnp->name);
+
+                       base = get_property(cnp, "reg", NULL);
+                       if (base == NULL)
+                               continue;
+
+                       snprintf(name+strlen(name), BUS_ID_SIZE, "@%x", *base);
+                       of_platform_device_create(cnp, name, NULL);
+
+                       printk(KERN_INFO EFIKA_PLATFORM_NAME" : Added %s (type '%s' at '%s') to the known devices\n", name, cnp->type, cnp->full_name);
+               }
+       }
+
+       if (ppc_md.progress)
+               ppc_md.progress("  Have fun with your Efika!    ", 0x7777);
+}
+
+static int __init efika_probe(void)
+{
+       char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+                                         "model", NULL);
+
+       if (model == NULL)
+               return 0;
+       if (strcmp(model, "EFIKA5K2"))
+               return 0;
+
+       ISA_DMA_THRESHOLD = ~0L;
+       DMA_MODE_READ = 0x44;
+       DMA_MODE_WRITE = 0x48;
+
+       return 1;
+}
+
+define_machine(efika)
+{
+       .name = EFIKA_PLATFORM_NAME,
+       .probe = efika_probe,
+       .setup_arch = efika_setup_arch,
+       .init = efika_init,
+       .show_cpuinfo = efika_show_cpuinfo,
+       .init_IRQ = mpc52xx_init_irq,
+       .get_irq = mpc52xx_get_irq,
+       .restart = rtas_restart,
+       .power_off = rtas_power_off,
+       .halt = rtas_halt,
+       .set_rtc_time = rtas_set_rtc_time,
+       .get_rtc_time = rtas_get_rtc_time,
+       .progress = rtas_progress,
+       .get_boot_time = rtas_get_boot_time,
+       .calibrate_decr = generic_calibrate_decr,
+       .phys_mem_access_prot = pci_phys_mem_access_prot,
+};
diff --git a/arch/powerpc/platforms/52xx/efika.h b/arch/powerpc/platforms/52xx/efika.h
new file mode 100644 (file)
index 0000000..2f060fd
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Efika 5K2 platform setup - Header file
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef __ARCH_POWERPC_EFIKA__
+#define __ARCH_POWERPC_EFIKA__
+
+#define EFIKA_PLATFORM_NAME "Efika"
+
+extern void __init efika_pcisetup(void);
+
+#endif
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
new file mode 100644 (file)
index 0000000..a375c15
--- /dev/null
@@ -0,0 +1,162 @@
+/*
+ * Freescale Lite5200 board support
+ *
+ * Written by: Grant Likely <grant.likely@secretlab.ca>
+ *
+ * Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Description:
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+#include <asm/of_platform.h>
+
+#include <asm/mpc52xx.h>
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+
+static void __init
+lite52xx_setup_cpu(void)
+{
+       struct mpc52xx_gpio __iomem *gpio;
+       u32 port_config;
+
+       /* Map zones */
+       gpio = mpc52xx_find_and_map("mpc52xx-gpio");
+       if (!gpio) {
+               printk(KERN_ERR __FILE__ ": "
+                       "Error while mapping GPIO register for port config. "
+                       "Expect some abnormal behavior\n");
+               goto error;
+       }
+
+       /* Set port config */
+       port_config = in_be32(&gpio->port_config);
+
+       port_config &= ~0x00800000;     /* 48Mhz internal, pin is GPIO  */
+
+       port_config &= ~0x00007000;     /* USB port : Differential mode */
+       port_config |=  0x00001000;     /*            USB 1 only        */
+
+       port_config &= ~0x03000000;     /* ATA CS is on csb_4/5         */
+       port_config |=  0x01000000;
+
+       pr_debug("port_config: old:%x new:%x\n",
+                in_be32(&gpio->port_config), port_config);
+       out_be32(&gpio->port_config, port_config);
+
+       /* Unmap zone */
+error:
+       iounmap(gpio);
+}
+
+static void __init lite52xx_setup_arch(void)
+{
+       struct device_node *np;
+
+       if (ppc_md.progress)
+               ppc_md.progress("lite52xx_setup_arch()", 0);
+
+       np = of_find_node_by_type(NULL, "cpu");
+       if (np) {
+               unsigned int *fp =
+                   (int *)get_property(np, "clock-frequency", NULL);
+               if (fp != 0)
+                       loops_per_jiffy = *fp / HZ;
+               else
+                       loops_per_jiffy = 50000000 / HZ;
+               of_node_put(np);
+       }
+
+       /* CPU & Port mux setup */
+       mpc52xx_setup_cpu();    /* Generic */
+       lite52xx_setup_cpu();   /* Platorm specific */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (initrd_start)
+               ROOT_DEV = Root_RAM0;
+       else
+#endif
+#ifdef  CONFIG_ROOT_NFS
+               ROOT_DEV = Root_NFS;
+#else
+               ROOT_DEV = Root_HDA1;
+#endif
+
+}
+
+void lite52xx_show_cpuinfo(struct seq_file *m)
+{
+       struct device_node* np = of_find_all_nodes(NULL);
+       const char *model = NULL;
+
+       if (np)
+               model = get_property(np, "model", NULL);
+
+       seq_printf(m, "vendor\t\t:      Freescale Semiconductor\n");
+       seq_printf(m, "machine\t\t:     %s\n", model ? model : "unknown");
+
+       of_node_put(np);
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init lite52xx_probe(void)
+{
+       unsigned long node = of_get_flat_dt_root();
+       const char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+       if (!of_flat_dt_is_compatible(node, "lite52xx"))
+               return 0;
+       pr_debug("%s board w/ mpc52xx found\n", model ? model : "unknown");
+
+       return 1;
+}
+
+define_machine(lite52xx) {
+       .name           = "lite52xx",
+       .probe          = lite52xx_probe,
+       .setup_arch     = lite52xx_setup_arch,
+       .init_IRQ       = mpc52xx_init_irq,
+       .get_irq        = mpc52xx_get_irq,
+       .show_cpuinfo   = lite52xx_show_cpuinfo,
+       .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
new file mode 100644 (file)
index 0000000..8331ff4
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ *
+ * Utility functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/of_platform.h>
+#include <asm/mpc52xx.h>
+
+
+void __iomem *
+mpc52xx_find_and_map(const char *compatible)
+{
+       struct device_node *ofn;
+       const u32 *regaddr_p;
+       u64 regaddr64, size64;
+
+       ofn = of_find_compatible_node(NULL, NULL, compatible);
+       if (!ofn)
+               return NULL;
+
+       regaddr_p = of_get_address(ofn, 0, &size64, NULL);
+       if (!regaddr_p) {
+               of_node_put(ofn);
+               return NULL;
+       }
+
+       regaddr64 = of_translate_address(ofn, regaddr_p);
+
+       of_node_put(ofn);
+
+       return ioremap((u32)regaddr64, (u32)size64);
+}
+EXPORT_SYMBOL(mpc52xx_find_and_map);
+
+
+/**
+ *     mpc52xx_find_ipb_freq - Find the IPB bus frequency for a device
+ *     @node:  device node
+ *
+ *     Returns IPB bus frequency, or 0 if the bus frequency cannot be found.
+ */
+unsigned int
+mpc52xx_find_ipb_freq(struct device_node *node)
+{
+       struct device_node *np;
+       const unsigned int *p_ipb_freq = NULL;
+
+       of_node_get(node);
+       while (node) {
+               p_ipb_freq = get_property(node, "bus-frequency", NULL);
+               if (p_ipb_freq)
+                       break;
+
+               np = of_get_parent(node);
+               of_node_put(node);
+               node = np;
+       }
+       if (node)
+               of_node_put(node);
+
+       return p_ipb_freq ? *p_ipb_freq : 0;
+}
+EXPORT_SYMBOL(mpc52xx_find_ipb_freq);
+
+
+void __init
+mpc52xx_setup_cpu(void)
+{
+       struct mpc52xx_cdm  __iomem *cdm;
+       struct mpc52xx_xlb  __iomem *xlb;
+
+       /* Map zones */
+       cdm = mpc52xx_find_and_map("mpc52xx-cdm");
+       xlb = mpc52xx_find_and_map("mpc52xx-xlb");
+
+       if (!cdm || !xlb) {
+               printk(KERN_ERR __FILE__ ": "
+                       "Error while mapping CDM/XLB during mpc52xx_setup_cpu. "
+                       "Expect some abnormal behavior\n");
+               goto unmap_regs;
+       }
+
+       /* Use internal 48 Mhz */
+       out_8(&cdm->ext_48mhz_en, 0x00);
+       out_8(&cdm->fd_enable, 0x01);
+       if (in_be32(&cdm->rstcfg) & 0x40)       /* Assumes 33Mhz clock */
+               out_be16(&cdm->fd_counters, 0x0001);
+       else
+               out_be16(&cdm->fd_counters, 0x5555);
+
+       /* Configure the XLB Arbiter priorities */
+       out_be32(&xlb->master_pri_enable, 0xff);
+       out_be32(&xlb->master_priority, 0x11111111);
+
+       /* Disable XLB pipelining */
+       /* (cfr errate 292. We could do this only just before ATA PIO
+           transaction and re-enable it afterwards ...) */
+       out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
+
+       /* Unmap zones */
+unmap_regs:
+       if (cdm) iounmap(cdm);
+       if (xlb) iounmap(xlb);
+}
+
+static int __init
+mpc52xx_declare_of_platform_devices(void)
+{
+       /* Find every child of the SOC node and add it to of_platform */
+       return of_platform_bus_probe(NULL, NULL, NULL);
+}
+
+device_initcall(mpc52xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
new file mode 100644 (file)
index 0000000..cd91a6c
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ *
+ * Programmable Interrupt Controller functions for the Freescale MPC52xx.
+ *
+ * Copyright (C) 2006 bplan GmbH
+ *
+ * Based on the code from the 2.4 kernel by
+ * Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
+ *
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Montavista Software, Inc
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/hardirq.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/mpc52xx.h>
+#include "mpc52xx_pic.h"
+
+/*
+ *
+*/
+
+static struct mpc52xx_intr __iomem *intr;
+static struct mpc52xx_sdma __iomem *sdma;
+static struct irq_host *mpc52xx_irqhost = NULL;
+
+static unsigned char mpc52xx_map_senses[4] = {
+       IRQ_TYPE_LEVEL_HIGH,
+       IRQ_TYPE_EDGE_RISING,
+       IRQ_TYPE_EDGE_FALLING,
+       IRQ_TYPE_LEVEL_LOW,
+};
+
+/*
+ *
+*/
+
+static inline void io_be_setbit(u32 __iomem *addr, int bitno)
+{
+       out_be32(addr, in_be32(addr) | (1 << bitno));
+}
+
+static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
+{
+       out_be32(addr, in_be32(addr) & ~(1 << bitno));
+}
+
+/*
+ * IRQ[0-3] interrupt irq_chip
+*/
+
+static void mpc52xx_extirq_mask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_clrbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_unmask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_setbit(&intr->ctrl, 11 - l2irq);
+}
+
+static void mpc52xx_extirq_ack(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_setbit(&intr->ctrl, 27-l2irq);
+}
+
+static struct irq_chip mpc52xx_extirq_irqchip = {
+       .typename = " MPC52xx IRQ[0-3] ",
+       .mask = mpc52xx_extirq_mask,
+       .unmask = mpc52xx_extirq_unmask,
+       .ack = mpc52xx_extirq_ack,
+};
+
+/*
+ * Main interrupt irq_chip
+*/
+
+static void mpc52xx_main_mask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_setbit(&intr->main_mask, 15 - l2irq);
+}
+
+static void mpc52xx_main_unmask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_clrbit(&intr->main_mask, 15 - l2irq);
+}
+
+static struct irq_chip mpc52xx_main_irqchip = {
+       .typename = "MPC52xx Main",
+       .mask = mpc52xx_main_mask,
+       .mask_ack = mpc52xx_main_mask,
+       .unmask = mpc52xx_main_unmask,
+};
+
+/*
+ * Peripherals interrupt irq_chip
+*/
+
+static void mpc52xx_periph_mask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_setbit(&intr->per_mask, 31 - l2irq);
+}
+
+static void mpc52xx_periph_unmask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_clrbit(&intr->per_mask, 31 - l2irq);
+}
+
+static struct irq_chip mpc52xx_periph_irqchip = {
+       .typename = "MPC52xx Peripherals",
+       .mask = mpc52xx_periph_mask,
+       .mask_ack = mpc52xx_periph_mask,
+       .unmask = mpc52xx_periph_unmask,
+};
+
+/*
+ * SDMA interrupt irq_chip
+*/
+
+static void mpc52xx_sdma_mask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_setbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_unmask(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       io_be_clrbit(&sdma->IntMask, l2irq);
+}
+
+static void mpc52xx_sdma_ack(unsigned int virq)
+{
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
+
+       out_be32(&sdma->IntPend, 1 << l2irq);
+}
+
+static struct irq_chip mpc52xx_sdma_irqchip = {
+       .typename = "MPC52xx SDMA",
+       .mask = mpc52xx_sdma_mask,
+       .unmask = mpc52xx_sdma_unmask,
+       .ack = mpc52xx_sdma_ack,
+};
+
+/*
+ * irq_host
+*/
+
+static int mpc52xx_irqhost_match(struct irq_host *h, struct device_node *node)
+{
+       pr_debug("%s: node=%p\n", __func__, node);
+       return mpc52xx_irqhost->host_data == node;
+}
+
+static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
+                                u32 * intspec, unsigned int intsize,
+                                irq_hw_number_t * out_hwirq,
+                                unsigned int *out_flags)
+{
+       int intrvect_l1;
+       int intrvect_l2;
+       int intrvect_type;
+       int intrvect_linux;
+
+       if (intsize != 3)
+               return -1;
+
+       intrvect_l1 = (int)intspec[0];
+       intrvect_l2 = (int)intspec[1];
+       intrvect_type = (int)intspec[2];
+
+       intrvect_linux =
+           (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
+       intrvect_linux |=
+           (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
+
+       pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
+                intrvect_l2);
+
+       *out_hwirq = intrvect_linux;
+       *out_flags = mpc52xx_map_senses[intrvect_type];
+
+       return 0;
+}
+
+/*
+ * this function retrieves the correct IRQ type out
+ * of the MPC regs
+ * Only externals IRQs needs this
+*/
+static int mpc52xx_irqx_gettype(int irq)
+{
+       int type;
+       u32 ctrl_reg;
+
+       ctrl_reg = in_be32(&intr->ctrl);
+       type = (ctrl_reg >> (22 - irq * 2)) & 0x3;
+
+       return mpc52xx_map_senses[type];
+}
+
+static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
+                              irq_hw_number_t irq)
+{
+       int l1irq;
+       int l2irq;
+       struct irq_chip *good_irqchip;
+       void *good_handle;
+       int type;
+
+       l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       /*
+        * Most of ours IRQs will be level low
+        * Only external IRQs on some platform may be others
+        */
+       type = IRQ_TYPE_LEVEL_LOW;
+
+       switch (l1irq) {
+       case MPC52xx_IRQ_L1_CRIT:
+               pr_debug("%s: Critical. l2=%x\n", __func__, l2irq);
+
+               BUG_ON(l2irq != 0);
+
+               type = mpc52xx_irqx_gettype(l2irq);
+               good_irqchip = &mpc52xx_extirq_irqchip;
+               break;
+
+       case MPC52xx_IRQ_L1_MAIN:
+               pr_debug("%s: Main IRQ[1-3] l2=%x\n", __func__, l2irq);
+
+               if ((l2irq >= 1) && (l2irq <= 3)) {
+                       type = mpc52xx_irqx_gettype(l2irq);
+                       good_irqchip = &mpc52xx_extirq_irqchip;
+               } else {
+                       good_irqchip = &mpc52xx_main_irqchip;
+               }
+               break;
+
+       case MPC52xx_IRQ_L1_PERP:
+               pr_debug("%s: Peripherals. l2=%x\n", __func__, l2irq);
+               good_irqchip = &mpc52xx_periph_irqchip;
+               break;
+
+       case MPC52xx_IRQ_L1_SDMA:
+               pr_debug("%s: SDMA. l2=%x\n", __func__, l2irq);
+               good_irqchip = &mpc52xx_sdma_irqchip;
+               break;
+
+       default:
+               pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
+               printk(KERN_ERR "Unknow IRQ!\n");
+               return -EINVAL;
+       }
+
+       switch (type) {
+       case IRQ_TYPE_EDGE_FALLING:
+       case IRQ_TYPE_EDGE_RISING:
+               good_handle = handle_edge_irq;
+               break;
+       default:
+               good_handle = handle_level_irq;
+       }
+
+       set_irq_chip_and_handler(virq, good_irqchip, good_handle);
+
+       pr_debug("%s: virq=%x, hw=%x. type=%x\n", __func__, virq,
+                (int)irq, type);
+
+       return 0;
+}
+
+static struct irq_host_ops mpc52xx_irqhost_ops = {
+       .match = mpc52xx_irqhost_match,
+       .xlate = mpc52xx_irqhost_xlate,
+       .map = mpc52xx_irqhost_map,
+};
+
+/*
+ * init (public)
+*/
+
+void __init mpc52xx_init_irq(void)
+{
+       u32 intr_ctrl;
+       struct device_node *picnode;
+
+       /* Remap the necessary zones */
+       picnode = of_find_compatible_node(NULL, NULL, "mpc52xx-pic");
+
+       intr = mpc52xx_find_and_map("mpc52xx-pic");
+       if (!intr)
+               panic(__FILE__  ": find_and_map failed on 'mpc52xx-pic'. "
+                               "Check node !");
+
+       sdma = mpc52xx_find_and_map("mpc52xx-bestcomm");
+       if (!sdma)
+               panic(__FILE__  ": find_and_map failed on 'mpc52xx-bestcomm'. "
+                               "Check node !");
+
+       /* Disable all interrupt sources. */
+       out_be32(&sdma->IntPend, 0xffffffff);   /* 1 means clear pending */
+       out_be32(&sdma->IntMask, 0xffffffff);   /* 1 means disabled */
+       out_be32(&intr->per_mask, 0x7ffffc00);  /* 1 means disabled */
+       out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
+       intr_ctrl = in_be32(&intr->ctrl);
+       intr_ctrl &= 0x00ff0000;        /* Keeps IRQ[0-3] config */
+       intr_ctrl |=    0x0f000000 |    /* clear IRQ 0-3 */
+                       0x00001000 |    /* MEE master external enable */
+                       0x00000000 |    /* 0 means disable IRQ 0-3 */
+                       0x00000001;     /* CEb route critical normally */
+       out_be32(&intr->ctrl, intr_ctrl);
+
+       /* Zero a bunch of the priority settings. */
+       out_be32(&intr->per_pri1, 0);
+       out_be32(&intr->per_pri2, 0);
+       out_be32(&intr->per_pri3, 0);
+       out_be32(&intr->main_pri1, 0);
+       out_be32(&intr->main_pri2, 0);
+
+       /*
+        * As last step, add an irq host to translate the real
+        * hw irq information provided by the ofw to linux virq
+        */
+
+       mpc52xx_irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
+                                        MPC52xx_IRQ_HIGHTESTHWIRQ,
+                                        &mpc52xx_irqhost_ops, -1);
+
+       if (!mpc52xx_irqhost)
+               panic(__FILE__ ": Cannot allocate the IRQ host\n");
+
+       mpc52xx_irqhost->host_data = picnode;
+       printk(KERN_INFO "MPC52xx PIC is up and running!\n");
+}
+
+/*
+ * get_irq (public)
+*/
+unsigned int mpc52xx_get_irq(void)
+{
+       u32 status;
+       int irq = NO_IRQ_IGNORE;
+
+       status = in_be32(&intr->enc_status);
+       if (status & 0x00000400) {      /* critical */
+               irq = (status >> 8) & 0x3;
+               if (irq == 2)   /* high priority peripheral */
+                       goto peripheral;
+               irq |=  (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
+                       MPC52xx_IRQ_L1_MASK;
+       } else if (status & 0x00200000) {       /* main */
+               irq = (status >> 16) & 0x1f;
+               if (irq == 4)   /* low priority peripheral */
+                       goto peripheral;
+               irq |=  (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
+                       MPC52xx_IRQ_L1_MASK;
+       } else if (status & 0x20000000) {       /* peripheral */
+             peripheral:
+               irq = (status >> 24) & 0x1f;
+               if (irq == 0) { /* bestcomm */
+                       status = in_be32(&sdma->IntPend);
+                       irq = ffs(status) - 1;
+                       irq |=  (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
+                               MPC52xx_IRQ_L1_MASK;
+               } else {
+                       irq |=  (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
+                               MPC52xx_IRQ_L1_MASK;
+               }
+       }
+
+       pr_debug("%s: irq=%x. virq=%d\n", __func__, irq,
+                irq_linear_revmap(mpc52xx_irqhost, irq));
+
+       return irq_linear_revmap(mpc52xx_irqhost, irq);
+}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
new file mode 100644 (file)
index 0000000..1a26bcd
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Header file for Freescale MPC52xx Interrupt controller
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
+#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
+
+#include <asm/types.h>
+
+
+/* HW IRQ mapping */
+#define MPC52xx_IRQ_L1_CRIT    (0)
+#define MPC52xx_IRQ_L1_MAIN    (1)
+#define MPC52xx_IRQ_L1_PERP    (2)
+#define MPC52xx_IRQ_L1_SDMA    (3)
+
+#define MPC52xx_IRQ_L1_OFFSET   (6)
+#define MPC52xx_IRQ_L1_MASK     (0x00c0)
+
+#define MPC52xx_IRQ_L2_OFFSET   (0)
+#define MPC52xx_IRQ_L2_MASK     (0x003f)
+
+#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
+
+
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+       u32 per_mask;           /* INTR + 0x00 */
+       u32 per_pri1;           /* INTR + 0x04 */
+       u32 per_pri2;           /* INTR + 0x08 */
+       u32 per_pri3;           /* INTR + 0x0c */
+       u32 ctrl;               /* INTR + 0x10 */
+       u32 main_mask;          /* INTR + 0x14 */
+       u32 main_pri1;          /* INTR + 0x18 */
+       u32 main_pri2;          /* INTR + 0x1c */
+       u32 reserved1;          /* INTR + 0x20 */
+       u32 enc_status;         /* INTR + 0x24 */
+       u32 crit_status;        /* INTR + 0x28 */
+       u32 main_status;        /* INTR + 0x2c */
+       u32 per_status;         /* INTR + 0x30 */
+       u32 reserved2;          /* INTR + 0x34 */
+       u32 per_error;          /* INTR + 0x38 */
+};
+
+#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
+
index bb9acbb98176049545240b81ddf8c70ee2ff68c7..ea880f1f0dcd4e71d73cad97b5e90ae9c833cd50 100644 (file)
@@ -515,16 +515,6 @@ static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
                return PCIBIOS_SUCCESSFUL;
 }
 
-static void
-__init mpc82xx_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev) {
-               pci_read_irq_line(dev);
-       }
-}
-
 void __init add_bridge(struct device_node *np)
 {
        int len;
@@ -597,9 +587,6 @@ static void __init mpc82xx_ads_setup_arch(void)
                add_bridge(np);
 
        of_node_put(np);
-       ppc_md.pci_map_irq = NULL;
-       ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
-       ppc_md.pcibios_fixup_bus = NULL;
 #endif
 
 #ifdef  CONFIG_ROOT_NFS
index a43ac71ab740ab9f851ea0a0f90b0f8450e9b8fd..f58c9780b66f705e3fe69aea463abdfe31b5c706 100644 (file)
@@ -97,8 +97,6 @@ static void __init mpc832x_sys_setup_arch(void)
 #ifdef CONFIG_PCI
        for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
                add_bridge(np);
-
-       ppc_md.pci_swizzle = common_swizzle;
        ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
index e2bcaaf6b329338ef48bfb1335709c5c9adbe6d7..314c42ac604834ea7dcf508bfe9a92c81a11bada 100644 (file)
@@ -118,7 +118,4 @@ define_machine(mpc834x_itx) {
        .time_init              = mpc83xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
-#ifdef CONFIG_PCI
-       .pcibios_fixup          = mpc83xx_pcibios_fixup,
-#endif
 };
index 677196187a4e67783a67702cc0a27fc644c39c8b..80b735a414d913c9840d3ccb776b470a431785e0 100644 (file)
@@ -137,7 +137,4 @@ define_machine(mpc834x_sys) {
        .time_init              = mpc83xx_time_init,
        .calibrate_decr         = generic_calibrate_decr,
        .progress               = udbg_progress,
-#ifdef CONFIG_PCI
-       .pcibios_fixup          = mpc83xx_pcibios_fixup,
-#endif
 };
index 1a523c81c06e528de4ac67bfe05d384ff054d6f5..7bfd47ad723317ed13aa83c5241b6203cfef3f9a 100644 (file)
@@ -102,8 +102,6 @@ static void __init mpc8360_sys_setup_arch(void)
 #ifdef CONFIG_PCI
        for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
                add_bridge(np);
-
-       ppc_md.pci_swizzle = common_swizzle;
        ppc_md.pci_exclude_device = mpc83xx_exclude_device;
 #endif
 
index 2c82bca9bfbbe32c826e855a08f16e1035b11cab..01cae106912bec714d4df34105c3305971970554 100644 (file)
@@ -11,7 +11,6 @@
 
 extern int add_bridge(struct device_node *dev);
 extern int mpc83xx_exclude_device(u_char bus, u_char devfn);
-extern void mpc83xx_pcibios_fixup(void);
 extern void mpc83xx_restart(char *cmd);
 extern long mpc83xx_time_init(void);
 
index 4557ac5255c1fda03f7fe43025d93f333465a677..9c3650555144d91b8a4d9ebdd8a9a0915ad2263c 100644 (file)
@@ -45,15 +45,6 @@ int mpc83xx_exclude_device(u_char bus, u_char devfn)
        return PCIBIOS_SUCCESSFUL;
 }
 
-void __init mpc83xx_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       /* map all the PCI irqs */
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
-}
-
 int __init add_bridge(struct device_node *dev)
 {
        int len;
index 26c5e822c7c8f211bf969ed869d5e3290fcaf28d..3e62fcb04c1c523766641db8bb89ec1821a3e378 100644 (file)
@@ -21,11 +21,3 @@ void mpc85xx_restart(char *cmd)
        local_irq_disable();
        abort();
 }
-
-/* For now this is a pass through */
-phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
-{
-       return addr;
-};
-
-EXPORT_SYMBOL(fixup_bigphys_addr);
index d3e669d69c735c2f0dfbfed753784378942129d5..bda2e55e6c4c1db7e5f84eadd8a2c18d18682894 100644 (file)
@@ -53,15 +53,6 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
        else
                return PCIBIOS_SUCCESSFUL;
 }
-
-void __init
-mpc85xx_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
-}
 #endif /* CONFIG_PCI */
 
 #ifdef CONFIG_CPM2
@@ -253,8 +244,6 @@ static void __init mpc85xx_ads_setup_arch(void)
 #ifdef CONFIG_PCI
        for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
                add_bridge(np);
-
-       ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
        ppc_md.pci_exclude_device = mpc85xx_exclude_device;
 #endif
 
index 1a1c226ad4d960a76ddc076224b5944f99370938..f4dd5f2f8a28f262911afba39eb20016784432ab 100644 (file)
@@ -398,15 +398,6 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
 }
 
 
-void __init mpc86xx_hpcn_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
-}
-
-
 /*
  * Called very early, device-tree isn't unflattened
  */
@@ -461,7 +452,6 @@ define_machine(mpc86xx_hpcn) {
        .setup_arch             = mpc86xx_hpcn_setup_arch,
        .init_IRQ               = mpc86xx_hpcn_init_irq,
        .show_cpuinfo           = mpc86xx_hpcn_show_cpuinfo,
-       .pcibios_fixup          = mpc86xx_hpcn_pcibios_fixup,
        .get_irq                = mpic_get_irq,
        .restart                = mpc86xx_restart,
        .time_init              = mpc86xx_time_init,
index e58fa953a50bfc465e3bbc8d79bba22b14174293..44d95eaf22e642ae0dc14fcfc05f2eff83bc52b8 100644 (file)
@@ -7,12 +7,14 @@ endif
 endif
 obj-$(CONFIG_PPC_CHRP)         += chrp/
 obj-$(CONFIG_4xx)              += 4xx/
+obj-$(CONFIG_PPC_MPC52xx)      += 52xx/
 obj-$(CONFIG_PPC_83xx)         += 83xx/
 obj-$(CONFIG_PPC_85xx)         += 85xx/
 obj-$(CONFIG_PPC_86xx)         += 86xx/
 obj-$(CONFIG_PPC_PSERIES)      += pseries/
 obj-$(CONFIG_PPC_ISERIES)      += iseries/
 obj-$(CONFIG_PPC_MAPLE)                += maple/
-obj-$(CONFIG_PPC_PASEMI)               += pasemi/
+obj-$(CONFIG_PPC_PASEMI)       += pasemi/
 obj-$(CONFIG_PPC_CELL)         += cell/
+obj-$(CONFIG_PPC_PS3)          += ps3/
 obj-$(CONFIG_EMBEDDED6xx)      += embedded6xx/
index 3e430b489bb7540414022caee7099771357f5ccf..06a85b7043315550dadf0c63bdfe38acaa7c8aa2 100644 (file)
@@ -20,4 +20,18 @@ config CBE_RAS
        bool "RAS features for bare metal Cell BE"
        default y
 
+config CBE_THERM
+       tristate "CBE thermal support"
+       default m
+       depends on CBE_RAS
+
+config CBE_CPUFREQ
+       tristate "CBE frequency scaling"
+       depends on CBE_RAS && CPU_FREQ
+       default m
+       help
+         This adds the cpufreq driver for Cell BE processors.
+         For details, take a look at <file:Documentation/cpu-freq/>.
+         If you don't have such processor, say N
+
 endmenu
index c89cdd67383b21a96c6a7a8d9fc5dc6189f05407..f90e8337796cbe3b35046db8c145579f5e398e14 100644 (file)
@@ -1,7 +1,11 @@
 obj-$(CONFIG_PPC_CELL_NATIVE)          += interrupt.o iommu.o setup.o \
-                                          cbe_regs.o spider-pic.o pervasive.o
+                                          cbe_regs.o spider-pic.o \
+                                          pervasive.o pmu.o io-workarounds.o
 obj-$(CONFIG_CBE_RAS)                  += ras.o
 
+obj-$(CONFIG_CBE_THERM)                        += cbe_thermal.o
+obj-$(CONFIG_CBE_CPUFREQ)              += cbe_cpufreq.o
+
 ifeq ($(CONFIG_SMP),y)
 obj-$(CONFIG_PPC_CELL_NATIVE)          += smp.o
 endif
@@ -11,5 +15,6 @@ spufs-modular-$(CONFIG_SPU_FS)                += spu_syscalls.o
 spu-priv1-$(CONFIG_PPC_CELL_NATIVE)    += spu_priv1_mmio.o
 
 obj-$(CONFIG_SPU_BASE)                 += spu_callbacks.o spu_base.o \
+                                          spu_coredump.o \
                                           $(spufs-modular-m) \
                                           $(spu-priv1-y) spufs/
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
new file mode 100644 (file)
index 0000000..a3850fd
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * cpufreq driver for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+
+#include "cbe_regs.h"
+
+static DEFINE_MUTEX(cbe_switch_mutex);
+
+
+/* the CBE supports an 8 step frequency scaling */
+static struct cpufreq_frequency_table cbe_freqs[] = {
+       {1,     0},
+       {2,     0},
+       {3,     0},
+       {4,     0},
+       {5,     0},
+       {6,     0},
+       {8,     0},
+       {10,    0},
+       {0,     CPUFREQ_TABLE_END},
+};
+
+/* to write to MIC register */
+static u64 MIC_Slow_Fast_Timer_table[] = {
+       [0 ... 7] = 0x007fc00000000000ull,
+};
+
+/* more values for the MIC */
+static u64 MIC_Slow_Next_Timer_table[] = {
+       0x0000240000000000ull,
+       0x0000268000000000ull,
+       0x000029C000000000ull,
+       0x00002D0000000000ull,
+       0x0000300000000000ull,
+       0x0000334000000000ull,
+       0x000039C000000000ull,
+       0x00003FC000000000ull,
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_pmode(int cpu)
+{
+       int ret;
+       struct cbe_pmd_regs __iomem *pmd_regs;
+
+       pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+       ret = in_be64(&pmd_regs->pmsr) & 0x07;
+
+       return ret;
+}
+
+static int set_pmode(int cpu, unsigned int pmode)
+{
+       struct cbe_pmd_regs __iomem *pmd_regs;
+       struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+       u64 flags;
+       u64 value;
+
+       local_irq_save(flags);
+
+       mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
+       pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+       pr_debug("pm register is mapped at %p\n", &pmd_regs->pmcr);
+       pr_debug("mic register is mapped at %p\n", &mic_tm_regs->slow_fast_timer_0);
+
+       out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
+       out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
+
+       out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
+       out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
+
+       value = in_be64(&pmd_regs->pmcr);
+       /* set bits to zero */
+       value &= 0xFFFFFFFFFFFFFFF8ull;
+       /* set bits to next pmode */
+       value |= pmode;
+
+       out_be64(&pmd_regs->pmcr, value);
+
+       /* wait until new pmode appears in status register */
+       value = in_be64(&pmd_regs->pmsr) & 0x07;
+       while(value != pmode) {
+               cpu_relax();
+               value = in_be64(&pmd_regs->pmsr) & 0x07;
+       }
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int cbe_cpufreq_cpu_init (struct cpufreq_policy *policy)
+{
+       u32 *max_freq;
+       int i, cur_pmode;
+       struct device_node *cpu;
+
+       cpu = of_get_cpu_node(policy->cpu, NULL);
+
+       if(!cpu)
+               return -ENODEV;
+
+       pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+       max_freq = (u32*) get_property(cpu, "clock-frequency", NULL);
+
+       if(!max_freq)
+               return -EINVAL;
+
+       // we need the freq in kHz
+       *max_freq /= 1000;
+
+       pr_debug("max clock-frequency is at %u kHz\n", *max_freq);
+       pr_debug("initializing frequency table\n");
+
+       // initialize frequency table
+       for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+               cbe_freqs[i].frequency = *max_freq / cbe_freqs[i].index;
+               pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
+       }
+
+       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+       /* if DEBUG is enabled set_pmode() measures the correct latency of a transition */
+       policy->cpuinfo.transition_latency = 25000;
+
+       cur_pmode = get_pmode(policy->cpu);
+       pr_debug("current pmode is at %d\n",cur_pmode);
+
+       policy->cur = cbe_freqs[cur_pmode].frequency;
+
+#ifdef CONFIG_SMP
+       policy->cpus = cpu_sibling_map[policy->cpu];
+#endif
+
+       cpufreq_frequency_table_get_attr (cbe_freqs, policy->cpu);
+
+       /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */
+       return cpufreq_frequency_table_cpuinfo (policy, cbe_freqs);
+}
+
+static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+       cpufreq_frequency_table_put_attr(policy->cpu);
+       return 0;
+}
+
+static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
+{
+       return cpufreq_frequency_table_verify(policy, cbe_freqs);
+}
+
+
+static int cbe_cpufreq_target(struct cpufreq_policy *policy, unsigned int target_freq,
+                           unsigned int relation)
+{
+       int rc;
+       struct cpufreq_freqs freqs;
+       int cbe_pmode_new;
+
+       cpufreq_frequency_table_target(policy,
+                                      cbe_freqs,
+                                      target_freq,
+                                      relation,
+                                      &cbe_pmode_new);
+
+       freqs.old = policy->cur;
+       freqs.new = cbe_freqs[cbe_pmode_new].frequency;
+       freqs.cpu = policy->cpu;
+
+       mutex_lock (&cbe_switch_mutex);
+       cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+       pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+                policy->cpu,
+                cbe_freqs[cbe_pmode_new].frequency,
+                cbe_freqs[cbe_pmode_new].index);
+
+       rc = set_pmode(policy->cpu, cbe_pmode_new);
+       cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+       mutex_unlock(&cbe_switch_mutex);
+
+       return rc;
+}
+
+static struct cpufreq_driver cbe_cpufreq_driver = {
+       .verify         = cbe_cpufreq_verify,
+       .target         = cbe_cpufreq_target,
+       .init           = cbe_cpufreq_cpu_init,
+       .exit           = cbe_cpufreq_cpu_exit,
+       .name           = "cbe-cpufreq",
+       .owner          = THIS_MODULE,
+       .flags          = CPUFREQ_CONST_LOOPS,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init cbe_cpufreq_init(void)
+{
+       return cpufreq_register_driver(&cbe_cpufreq_driver);
+}
+
+static void __exit cbe_cpufreq_exit(void)
+{
+       cpufreq_unregister_driver(&cbe_cpufreq_driver);
+}
+
+module_init(cbe_cpufreq_init);
+module_exit(cbe_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
index 2f194ba29899699883e612b136a3972598d08514..9a0ee62691d529f69a90779e5bbe62cd7e5fbdcd 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <linux/module.h>
 
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -16,8 +17,6 @@
 
 #include "cbe_regs.h"
 
-#define MAX_CBE                2
-
 /*
  * Current implementation uses "cpu" nodes. We build our own mapping
  * array of cpu numbers to cpu nodes locally for now to allow interrupt
@@ -30,6 +29,8 @@ static struct cbe_regs_map
        struct device_node *cpu_node;
        struct cbe_pmd_regs __iomem *pmd_regs;
        struct cbe_iic_regs __iomem *iic_regs;
+       struct cbe_mic_tm_regs __iomem *mic_tm_regs;
+       struct cbe_pmd_shadow_regs pmd_shadow_regs;
 } cbe_regs_maps[MAX_CBE];
 static int cbe_regs_map_count;
 
@@ -42,6 +43,19 @@ static struct cbe_thread_map
 static struct cbe_regs_map *cbe_find_map(struct device_node *np)
 {
        int i;
+       struct device_node *tmp_np;
+
+       if (strcasecmp(np->type, "spe") == 0) {
+               if (np->data == NULL) {
+                       /* walk up path until cpu node was found */
+                       tmp_np = np->parent;
+                       while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
+                               tmp_np = tmp_np->parent;
+
+                       np->data = cbe_find_map(tmp_np);
+               }
+               return np->data;
+       }
 
        for (i = 0; i < cbe_regs_map_count; i++)
                if (cbe_regs_maps[i].cpu_node == np)
@@ -56,6 +70,7 @@ struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
                return NULL;
        return map->pmd_regs;
 }
+EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
 
 struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
 {
@@ -64,7 +79,23 @@ struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
                return NULL;
        return map->pmd_regs;
 }
+EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
 
+struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
+{
+       struct cbe_regs_map *map = cbe_find_map(np);
+       if (map == NULL)
+               return NULL;
+       return &map->pmd_shadow_regs;
+}
+
+struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
+{
+       struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+       if (map == NULL)
+               return NULL;
+       return &map->pmd_shadow_regs;
+}
 
 struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
 {
@@ -73,6 +104,7 @@ struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
                return NULL;
        return map->iic_regs;
 }
+
 struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
 {
        struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
@@ -81,6 +113,36 @@ struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
        return map->iic_regs;
 }
 
+struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
+{
+       struct cbe_regs_map *map = cbe_find_map(np);
+       if (map == NULL)
+               return NULL;
+       return map->mic_tm_regs;
+}
+
+struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
+{
+       struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
+       if (map == NULL)
+               return NULL;
+       return map->mic_tm_regs;
+}
+EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
+
+/* FIXME
+ * This is little more than a stub at the moment.  It should be
+ * fleshed out so that it works for both SMT and non-SMT, no
+ * matter if the passed cpu is odd or even.
+ * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
+ * For SMT disabled, returns 0 for all cpus.
+ */
+u32 cbe_get_hw_thread_id(int cpu)
+{
+       return (cpu & 1);
+}
+EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
+
 void __init cbe_regs_init(void)
 {
        int i;
@@ -119,6 +181,11 @@ void __init cbe_regs_init(void)
                prop = get_property(cpu, "iic", NULL);
                if (prop != NULL)
                        map->iic_regs = ioremap(prop->address, prop->len);
+
+               prop = (struct address_prop *)get_property(cpu, "mic-tm",
+                                                          NULL);
+               if (prop != NULL)
+                       map->mic_tm_regs = ioremap(prop->address, prop->len);
        }
 }
 
index e76e4a6af5bc195c518411ec24d973dd09525109..440a7ecc66eab1c3d2a894579c9dc399c7b9a351 100644 (file)
@@ -4,12 +4,19 @@
  * This file is intended to hold the various register definitions for CBE
  * on-chip system devices (memory controller, IO controller, etc...)
  *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ *          David J. Erb (djerb@us.ibm.com)
+ *
  * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  */
 
 #ifndef CBE_REGS_H
 #define CBE_REGS_H
 
+#include <asm/cell-pmu.h>
+
 /*
  *
  * Some HID register definitions
@@ -22,6 +29,7 @@
 #define HID0_CBE_THERM_INT_EN  0x0000000400000000ul
 #define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
 
+#define MAX_CBE                2
 
 /*
  *
  *
  */
 
+union spe_reg {
+       u64 val;
+       u8 spe[8];
+};
+
+union ppe_spe_reg {
+       u64 val;
+       struct {
+               u32 ppe;
+               u32 spe;
+       };
+};
+
+
 struct cbe_pmd_regs {
-       u8 pad_0x0000_0x0800[0x0800 - 0x0000];                  /* 0x0000 */
+       /* Debug Bus Control */
+       u64     pad_0x0000;                                     /* 0x0000 */
+
+       u64     group_control;                                  /* 0x0008 */
+
+       u8      pad_0x0010_0x00a8 [0x00a8 - 0x0010];            /* 0x0010 */
+
+       u64     debug_bus_control;                              /* 0x00a8 */
+
+       u8      pad_0x00b0_0x0100 [0x0100 - 0x00b0];            /* 0x00b0 */
+
+       u64     trace_aux_data;                                 /* 0x0100 */
+       u64     trace_buffer_0_63;                              /* 0x0108 */
+       u64     trace_buffer_64_127;                            /* 0x0110 */
+       u64     trace_address;                                  /* 0x0118 */
+       u64     ext_tr_timer;                                   /* 0x0120 */
+
+       u8      pad_0x0128_0x0400 [0x0400 - 0x0128];            /* 0x0128 */
+
+       /* Performance Monitor */
+       u64     pm_status;                                      /* 0x0400 */
+       u64     pm_control;                                     /* 0x0408 */
+       u64     pm_interval;                                    /* 0x0410 */
+       u64     pm_ctr[4];                                      /* 0x0418 */
+       u64     pm_start_stop;                                  /* 0x0438 */
+       u64     pm07_control[8];                                /* 0x0440 */
+
+       u8      pad_0x0480_0x0800 [0x0800 - 0x0480];            /* 0x0480 */
 
        /* Thermal Sensor Registers */
-       u64  ts_ctsr1;                                          /* 0x0800 */
-       u64  ts_ctsr2;                                          /* 0x0808 */
-       u64  ts_mtsr1;                                          /* 0x0810 */
-       u64  ts_mtsr2;                                          /* 0x0818 */
-       u64  ts_itr1;                                           /* 0x0820 */
-       u64  ts_itr2;                                           /* 0x0828 */
-       u64  ts_gitr;                                           /* 0x0830 */
-       u64  ts_isr;                                            /* 0x0838 */
-       u64  ts_imr;                                            /* 0x0840 */
-       u64  tm_cr1;                                            /* 0x0848 */
-       u64  tm_cr2;                                            /* 0x0850 */
-       u64  tm_simr;                                           /* 0x0858 */
-       u64  tm_tpr;                                            /* 0x0860 */
-       u64  tm_str1;                                           /* 0x0868 */
-       u64  tm_str2;                                           /* 0x0870 */
-       u64  tm_tsr;                                            /* 0x0878 */
+       union   spe_reg ts_ctsr1;                               /* 0x0800 */
+       u64     ts_ctsr2;                                       /* 0x0808 */
+       union   spe_reg ts_mtsr1;                               /* 0x0810 */
+       u64     ts_mtsr2;                                       /* 0x0818 */
+       union   spe_reg ts_itr1;                                /* 0x0820 */
+       u64     ts_itr2;                                        /* 0x0828 */
+       u64     ts_gitr;                                        /* 0x0830 */
+       u64     ts_isr;                                         /* 0x0838 */
+       u64     ts_imr;                                         /* 0x0840 */
+       union   spe_reg tm_cr1;                                 /* 0x0848 */
+       u64     tm_cr2;                                         /* 0x0850 */
+       u64     tm_simr;                                        /* 0x0858 */
+       union   ppe_spe_reg tm_tpr;                             /* 0x0860 */
+       union   spe_reg tm_str1;                                /* 0x0868 */
+       u64     tm_str2;                                        /* 0x0870 */
+       union   ppe_spe_reg tm_tsr;                             /* 0x0878 */
 
        /* Power Management */
-       u64  pm_control;                                        /* 0x0880 */
-#define CBE_PMD_PAUSE_ZERO_CONTROL             0x10000
-       u64  pm_status;                                         /* 0x0888 */
+       u64     pmcr;                                           /* 0x0880 */
+#define CBE_PMD_PAUSE_ZERO_CONTROL     0x10000
+       u64     pmsr;                                           /* 0x0888 */
 
        /* Time Base Register */
-       u64  tbr;                                               /* 0x0890 */
+       u64     tbr;                                            /* 0x0890 */
 
-       u8   pad_0x0898_0x0c00 [0x0c00 - 0x0898];               /* 0x0898 */
+       u8      pad_0x0898_0x0c00 [0x0c00 - 0x0898];            /* 0x0898 */
 
        /* Fault Isolation Registers */
-       u64  checkstop_fir;                                     /* 0x0c00 */
-       u64  recoverable_fir;
-       u64  spec_att_mchk_fir;
-       u64  fir_mode_reg;
-       u64  fir_enable_mask;
+       u64     checkstop_fir;                                  /* 0x0c00 */
+       u64     recoverable_fir;                                /* 0x0c08 */
+       u64     spec_att_mchk_fir;                              /* 0x0c10 */
+       u64     fir_mode_reg;                                   /* 0x0c18 */
+       u64     fir_enable_mask;                                /* 0x0c20 */
 
-       u8   pad_0x0c28_0x1000 [0x1000 - 0x0c28];               /* 0x0c28 */
+       u8      pad_0x0c28_0x1000 [0x1000 - 0x0c28];            /* 0x0c28 */
 };
 
 extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
 extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
 
+/*
+ * PMU shadow registers
+ *
+ * Many of the registers in the performance monitoring unit are write-only,
+ * so we need to save a copy of what we write to those registers.
+ *
+ * The actual data counters are read/write. However, writing to the counters
+ * only takes effect if the PMU is enabled. Otherwise the value is stored in
+ * a hardware latch until the next time the PMU is enabled. So we save a copy
+ * of the counter values if we need to read them back while the PMU is
+ * disabled. The counter_value_in_latch field is a bitmap indicating which
+ * counters currently have a value waiting to be written.
+ */
+
+struct cbe_pmd_shadow_regs {
+       u32 group_control;
+       u32 debug_bus_control;
+       u32 trace_address;
+       u32 ext_tr_timer;
+       u32 pm_status;
+       u32 pm_control;
+       u32 pm_interval;
+       u32 pm_start_stop;
+       u32 pm07_control[NR_CTRS];
+
+       u32 pm_ctr[NR_PHYS_CTRS];
+       u32 counter_value_in_latch;
+};
+
+extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
+extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
+
 /*
  *
  * IIC unit register definitions
@@ -102,18 +183,28 @@ struct cbe_iic_regs {
 
        /* IIC interrupt registers */
        struct  cbe_iic_thread_regs thread[2];                  /* 0x0400 */
-       u64     iic_ir;                                         /* 0x0440 */
-       u64     iic_is;                                         /* 0x0448 */
+
+       u64     iic_ir;                                         /* 0x0440 */
+#define CBE_IIC_IR_PRIO(x)      (((x) & 0xf) << 12)
+#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
+#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
+#define CBE_IIC_IR_IOC_0        0x0
+#define CBE_IIC_IR_IOC_1S       0xb
+#define CBE_IIC_IR_PT_0         0xe
+#define CBE_IIC_IR_PT_1         0xf
+
+       u64     iic_is;                                         /* 0x0448 */
+#define CBE_IIC_IS_PMI         0x2
 
        u8      pad_0x0450_0x0500[0x0500 - 0x0450];             /* 0x0450 */
 
        /* IOC FIR */
        u64     ioc_fir_reset;                                  /* 0x0500 */
-       u64     ioc_fir_set;
-       u64     ioc_checkstop_enable;
-       u64     ioc_fir_error_mask;
-       u64     ioc_syserr_enable;
-       u64     ioc_fir;
+       u64     ioc_fir_set;                                    /* 0x0508 */
+       u64     ioc_checkstop_enable;                           /* 0x0510 */
+       u64     ioc_fir_error_mask;                             /* 0x0518 */
+       u64     ioc_syserr_enable;                              /* 0x0520 */
+       u64     ioc_fir;                                        /* 0x0528 */
 
        u8      pad_0x0530_0x1000[0x1000 - 0x0530];             /* 0x0530 */
 };
@@ -122,6 +213,48 @@ extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
 extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
 
 
+struct cbe_mic_tm_regs {
+       u8      pad_0x0000_0x0040[0x0040 - 0x0000];             /* 0x0000 */
+
+       u64     mic_ctl_cnfg2;                                  /* 0x0040 */
+#define CBE_MIC_ENABLE_AUX_TRC         0x8000000000000000LL
+#define CBE_MIC_DISABLE_PWR_SAV_2      0x0200000000000000LL
+#define CBE_MIC_DISABLE_AUX_TRC_WRAP   0x0100000000000000LL
+#define CBE_MIC_ENABLE_AUX_TRC_INT     0x0080000000000000LL
+
+       u64     pad_0x0048;                                     /* 0x0048 */
+
+       u64     mic_aux_trc_base;                               /* 0x0050 */
+       u64     mic_aux_trc_max_addr;                           /* 0x0058 */
+       u64     mic_aux_trc_cur_addr;                           /* 0x0060 */
+       u64     mic_aux_trc_grf_addr;                           /* 0x0068 */
+       u64     mic_aux_trc_grf_data;                           /* 0x0070 */
+
+       u64     pad_0x0078;                                     /* 0x0078 */
+
+       u64     mic_ctl_cnfg_0;                                 /* 0x0080 */
+#define CBE_MIC_DISABLE_PWR_SAV_0      0x8000000000000000LL
+
+       u64     pad_0x0088;                                     /* 0x0088 */
+
+       u64     slow_fast_timer_0;                              /* 0x0090 */
+       u64     slow_next_timer_0;                              /* 0x0098 */
+
+       u8      pad_0x00a0_0x01c0[0x01c0 - 0x0a0];              /* 0x00a0 */
+
+       u64     mic_ctl_cnfg_1;                                 /* 0x01c0 */
+#define CBE_MIC_DISABLE_PWR_SAV_1      0x8000000000000000LL
+       u64     pad_0x01c8;                                     /* 0x01c8 */
+
+       u64     slow_fast_timer_1;                              /* 0x01d0 */
+       u64     slow_next_timer_1;                              /* 0x01d8 */
+
+       u8      pad_0x01e0_0x1000[0x1000 - 0x01e0];             /* 0x01e0 */
+};
+
+extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
+extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
+
 /* Init this module early */
 extern void cbe_regs_init(void);
 
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
new file mode 100644 (file)
index 0000000..616a0a3
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * thermal support for the cell processor
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Christian Krafft <krafft@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <asm/spu.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+
+#include "cbe_regs.h"
+#include "spu_priv1_mmio.h"
+
+static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev)
+{
+       struct spu *spu;
+
+       spu = container_of(sysdev, struct spu, sysdev);
+
+       return cbe_get_pmd_regs(spu_devnode(spu));
+}
+
+/* returns the value for a given spu in a given register */
+static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg)
+{
+       unsigned int *id;
+       union spe_reg value;
+       struct spu *spu;
+
+       /* getting the id from the reg attribute will not work on future device-tree layouts
+        * in future we should store the id to the spu struct and use it here */
+       spu = container_of(sysdev, struct spu, sysdev);
+       id = (unsigned int *)get_property(spu_devnode(spu), "reg", NULL);
+       value.val = in_be64(&reg->val);
+
+       return value.spe[*id];
+}
+
+static ssize_t spu_show_temp(struct sys_device *sysdev, char *buf)
+{
+       int value;
+       struct cbe_pmd_regs __iomem *pmd_regs;
+
+       pmd_regs = get_pmd_regs(sysdev);
+
+       value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1);
+       /* clear all other bits */
+       value &= 0x3F;
+       /* temp is stored in steps of 2 degrees */
+       value *= 2;
+       /* base temp is 65 degrees */
+       value += 65;
+
+       return sprintf(buf, "%d\n", (int) value);
+}
+
+static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos)
+{
+       struct cbe_pmd_regs __iomem *pmd_regs;
+       u64 value;
+
+       pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+       value = in_be64(&pmd_regs->ts_ctsr2);
+
+       /* access the corresponding byte */
+       value >>= pos;
+       /* clear all other bits */
+       value &= 0x3F;
+       /* temp is stored in steps of 2 degrees */
+       value *= 2;
+       /* base temp is 65 degrees */
+       value += 65;
+
+       return sprintf(buf, "%d\n", (int) value);
+}
+
+
+/* shows the temperature of the DTS on the PPE,
+ * located near the linear thermal sensor */
+static ssize_t ppe_show_temp0(struct sys_device *sysdev, char *buf)
+{
+       return ppe_show_temp(sysdev, buf, 32);
+}
+
+/* shows the temperature of the second DTS on the PPE */
+static ssize_t ppe_show_temp1(struct sys_device *sysdev, char *buf)
+{
+       return ppe_show_temp(sysdev, buf, 0);
+}
+
+static struct sysdev_attribute attr_spu_temperature = {
+       .attr = {.name = "temperature", .mode = 0400 },
+       .show = spu_show_temp,
+};
+
+static struct attribute *spu_attributes[] = {
+       &attr_spu_temperature.attr,
+};
+
+static struct attribute_group spu_attribute_group = {
+       .name   = "thermal",
+       .attrs  = spu_attributes,
+};
+
+static struct sysdev_attribute attr_ppe_temperature0 = {
+       .attr = {.name = "temperature0", .mode = 0400 },
+       .show = ppe_show_temp0,
+};
+
+static struct sysdev_attribute attr_ppe_temperature1 = {
+       .attr = {.name = "temperature1", .mode = 0400 },
+       .show = ppe_show_temp1,
+};
+
+static struct attribute *ppe_attributes[] = {
+       &attr_ppe_temperature0.attr,
+       &attr_ppe_temperature1.attr,
+};
+
+static struct attribute_group ppe_attribute_group = {
+       .name   = "thermal",
+       .attrs  = ppe_attributes,
+};
+
+/*
+ * initialize throttling with default values
+ */
+static void __init init_default_values(void)
+{
+       int cpu;
+       struct cbe_pmd_regs __iomem *pmd_regs;
+       struct sys_device *sysdev;
+       union ppe_spe_reg tpr;
+       union spe_reg str1;
+       u64 str2;
+       union spe_reg cr1;
+       u64 cr2;
+
+       /* TPR defaults */
+       /* ppe
+        *      1F - no full stop
+        *      08 - dynamic throttling starts if over 80 degrees
+        *      03 - dynamic throttling ceases if below 70 degrees */
+       tpr.ppe = 0x1F0803;
+       /* spe
+        *      10 - full stopped when over 96 degrees
+        *      08 - dynamic throttling starts if over 80 degrees
+        *      03 - dynamic throttling ceases if below 70 degrees
+        */
+       tpr.spe = 0x100803;
+
+       /* STR defaults */
+       /* str1
+        *      10 - stop 16 of 32 cycles
+        */
+       str1.val = 0x1010101010101010ull;
+       /* str2
+        *      10 - stop 16 of 32 cycles
+        */
+       str2 = 0x10;
+
+       /* CR defaults */
+       /* cr1
+        *      4 - normal operation
+        */
+       cr1.val = 0x0404040404040404ull;
+       /* cr2
+        *      4 - normal operation
+        */
+       cr2 = 0x04;
+
+       for_each_possible_cpu (cpu) {
+               pr_debug("processing cpu %d\n", cpu);
+               sysdev = get_cpu_sysdev(cpu);
+               pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id);
+
+               out_be64(&pmd_regs->tm_str2, str2);
+               out_be64(&pmd_regs->tm_str1.val, str1.val);
+               out_be64(&pmd_regs->tm_tpr.val, tpr.val);
+               out_be64(&pmd_regs->tm_cr1.val, cr1.val);
+               out_be64(&pmd_regs->tm_cr2, cr2);
+       }
+}
+
+
+static int __init thermal_init(void)
+{
+       init_default_values();
+
+       spu_add_sysdev_attr_group(&spu_attribute_group);
+       cpu_add_sysdev_attr_group(&ppe_attribute_group);
+
+       return 0;
+}
+module_init(thermal_init);
+
+static void __exit thermal_exit(void)
+{
+       spu_remove_sysdev_attr_group(&spu_attribute_group);
+       cpu_remove_sysdev_attr_group(&ppe_attribute_group);
+}
+module_exit(thermal_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+
index a914c12b4060a2072d77a7bfc08e7c18c3265467..6666d037eb443c763fc9413f460f2ef5a275c86d 100644 (file)
@@ -396,3 +396,19 @@ void __init iic_init_IRQ(void)
        /* Enable on current CPU */
        iic_setup_cpu();
 }
+
+void iic_set_interrupt_routing(int cpu, int thread, int priority)
+{
+       struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
+       u64 iic_ir = 0;
+       int node = cpu >> 1;
+
+       /* Set which node and thread will handle the next interrupt */
+       iic_ir |= CBE_IIC_IR_PRIO(priority) |
+                 CBE_IIC_IR_DEST_NODE(node);
+       if (thread == 0)
+               iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
+       else
+               iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
+       out_be64(&iic_regs->iic_ir, iic_ir);
+}
index 9ba1d3c17b4b4c066dbb7cf1c7d168b01fea3f4d..942dc39d604559f4d1d58da68759a280dad968e7 100644 (file)
@@ -83,5 +83,7 @@ extern u8 iic_get_target_id(int cpu);
 
 extern void spider_init_IRQ(void);
 
+extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
+
 #endif
 #endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
new file mode 100644 (file)
index 0000000..580d425
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ *  Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ *                    IBM, Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+
+#define SPIDER_PCI_REG_BASE            0xd000
+#define SPIDER_PCI_VCI_CNTL_STAT       0x0110
+#define SPIDER_PCI_DUMMY_READ          0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE     0x0814
+
+/* Undefine that to re-enable bogus prefetch
+ *
+ * Without that workaround, the chip will do bogus prefetch past
+ * page boundary from system memory. This setting will disable that,
+ * though the documentation is unclear as to the consequences of doing
+ * so, either purely performances, or possible misbehaviour... It's not
+ * clear wether the chip can handle unaligned accesses at all without
+ * prefetching enabled.
+ *
+ * For now, things appear to be behaving properly with that prefetching
+ * disabled and IDE, possibly because IDE isn't doing any unaligned
+ * access.
+ */
+#define SPIDER_DISABLE_PREFETCH
+
+#define MAX_SPIDERS    2
+
+static struct spider_pci_bus {
+       void __iomem    *regs;
+       unsigned long   mmio_start;
+       unsigned long   mmio_end;
+       unsigned long   pio_vstart;
+       unsigned long   pio_vend;
+} spider_pci_busses[MAX_SPIDERS];
+static int spider_pci_count;
+
+static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
+                                             unsigned long paddr)
+{
+       int i;
+
+       for (i = 0; i < spider_pci_count; i++) {
+               struct spider_pci_bus *bus = &spider_pci_busses[i];
+               if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
+                       return bus;
+               if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
+                       return bus;
+       }
+       return NULL;
+}
+
+static void spider_io_flush(const volatile void __iomem *addr)
+{
+       struct spider_pci_bus *bus;
+       int token;
+
+       /* Get platform token (set by ioremap) from address */
+       token = PCI_GET_ADDR_TOKEN(addr);
+
+       /* Fast path if we have a non-0 token, it indicates which bus we
+        * are on.
+        *
+        * If the token is 0, that means either the the ioremap was done
+        * before we initialized this layer, or it's a PIO operation. We
+        * fallback to a low path in this case. Hopefully, internal devices
+        * which are ioremap'ed early should use in_XX/out_XX functions
+        * instead of the PCI ones and thus not suffer from the slowdown.
+        *
+        * Also note that currently, the workaround will not work for areas
+        * that are not mapped with PTEs (bolted in the hash table). This
+        * is the case for ioremaps done very early at boot (before
+        * mem_init_done) and includes the mapping of the ISA IO space.
+        *
+        * Fortunately, none of the affected devices is expected to do DMA
+        * and thus there should be no problem in practice.
+        *
+        * In order to improve performances, we only do the PTE search for
+        * addresses falling in the PHB IO space area. That means it will
+        * not work for hotplug'ed PHBs but those don't exist with Spider.
+        */
+       if (token && token <= spider_pci_count)
+               bus = &spider_pci_busses[token - 1];
+       else {
+               unsigned long vaddr, paddr;
+               pte_t *ptep;
+
+               /* Fixup physical address */
+               vaddr = (unsigned long)PCI_FIX_ADDR(addr);
+
+               /* Check if it's in allowed range for  PIO */
+               if (vaddr < PHBS_IO_BASE || vaddr >= IMALLOC_BASE)
+                       return;
+
+               /* Try to find a PTE. If not, clear the paddr, we'll do
+                * a vaddr only lookup (PIO only)
+                */
+               ptep = find_linux_pte(init_mm.pgd, vaddr);
+               if (ptep == NULL)
+                       paddr = 0;
+               else
+                       paddr = pte_pfn(*ptep) << PAGE_SHIFT;
+
+               bus = spider_pci_find(vaddr, paddr);
+               if (bus == NULL)
+                       return;
+       }
+
+       /* Now do the workaround
+        */
+       (void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
+}
+
+static u8 spider_readb(const volatile void __iomem *addr)
+{
+       u8 val = __do_readb(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u16 spider_readw(const volatile void __iomem *addr)
+{
+       u16 val = __do_readw(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u32 spider_readl(const volatile void __iomem *addr)
+{
+       u32 val = __do_readl(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u64 spider_readq(const volatile void __iomem *addr)
+{
+       u64 val = __do_readq(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u16 spider_readw_be(const volatile void __iomem *addr)
+{
+       u16 val = __do_readw_be(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u32 spider_readl_be(const volatile void __iomem *addr)
+{
+       u32 val = __do_readl_be(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static u64 spider_readq_be(const volatile void __iomem *addr)
+{
+       u64 val = __do_readq_be(addr);
+       spider_io_flush(addr);
+       return val;
+}
+
+static void spider_readsb(const volatile void __iomem *addr, void *buf,
+                         unsigned long count)
+{
+       __do_readsb(addr, buf, count);
+       spider_io_flush(addr);
+}
+
+static void spider_readsw(const volatile void __iomem *addr, void *buf,
+                         unsigned long count)
+{
+       __do_readsw(addr, buf, count);
+       spider_io_flush(addr);
+}
+
+static void spider_readsl(const volatile void __iomem *addr, void *buf,
+                         unsigned long count)
+{
+       __do_readsl(addr, buf, count);
+       spider_io_flush(addr);
+}
+
+static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
+                                unsigned long n)
+{
+       __do_memcpy_fromio(dest, src, n);
+       spider_io_flush(src);
+}
+
+
+static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
+                                    unsigned long flags)
+{
+       struct spider_pci_bus *bus;
+       void __iomem *res = __ioremap(addr, size, flags);
+       int busno;
+
+       pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
+                addr, size, flags, res);
+
+       bus = spider_pci_find(0, addr);
+       if (bus != NULL) {
+               busno = bus - spider_pci_busses;
+               pr_debug(" found bus %d, setting token\n", busno);
+               PCI_SET_ADDR_TOKEN(res, busno + 1);
+       }
+       pr_debug(" result=0x%p\n", res);
+
+       return res;
+}
+
+static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
+{
+#ifdef SPIDER_DISABLE_PREFETCH
+       u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
+       pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
+       out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
+#endif
+
+       /* Configure the dummy address for the workaround */
+       out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
+}
+
+static void __init spider_pci_add_one(struct pci_controller *phb)
+{
+       struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
+       struct device_node *np = phb->arch_data;
+       struct resource rsrc;
+       void __iomem *regs;
+
+       if (spider_pci_count >= MAX_SPIDERS) {
+               printk(KERN_ERR "Too many spider bridges, workarounds"
+                      " disabled for %s\n", np->full_name);
+               return;
+       }
+
+       /* Get the registers for the beast */
+       if (of_address_to_resource(np, 0, &rsrc)) {
+               printk(KERN_ERR "Failed to get registers for spider %s"
+                      " workarounds disabled\n", np->full_name);
+               return;
+       }
+
+       /* Mask out some useless bits in there to get to the base of the
+        * spider chip
+        */
+       rsrc.start &= ~0xfffffffful;
+
+       /* Map them */
+       regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
+       if (regs == NULL) {
+               printk(KERN_ERR "Failed to map registers for spider %s"
+                      " workarounds disabled\n", np->full_name);
+               return;
+       }
+
+       spider_pci_count++;
+
+       /* We assume spiders only have one MMIO resource */
+       bus->mmio_start = phb->mem_resources[0].start;
+       bus->mmio_end = phb->mem_resources[0].end + 1;
+
+       bus->pio_vstart = (unsigned long)phb->io_base_virt;
+       bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
+
+       bus->regs = regs;
+
+       printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
+
+       pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
+                bus->mmio_start, bus->mmio_end);
+       pr_debug("  pio (V) = 0x%016lx..0x%016lx\n",
+                bus->pio_vstart, bus->pio_vend);
+       pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
+                rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
+
+       spider_pci_setup_chip(bus);
+}
+
+static struct ppc_pci_io __initdata spider_pci_io = {
+       .readb = spider_readb,
+       .readw = spider_readw,
+       .readl = spider_readl,
+       .readq = spider_readq,
+       .readw_be = spider_readw_be,
+       .readl_be = spider_readl_be,
+       .readq_be = spider_readq_be,
+       .readsb = spider_readsb,
+       .readsw = spider_readsw,
+       .readsl = spider_readsl,
+       .memcpy_fromio = spider_memcpy_fromio,
+};
+
+static int __init spider_pci_workaround_init(void)
+{
+       struct pci_controller *phb;
+
+       if (!machine_is(cell))
+               return 0;
+
+       /* Find spider bridges. We assume they have been all probed
+        * in setup_arch(). If that was to change, we would need to
+        * update this code to cope with dynamically added busses
+        */
+       list_for_each_entry(phb, &hose_list, list_node) {
+               struct device_node *np = phb->arch_data;
+               const char *model = get_property(np, "model", NULL);
+
+               /* If no model property or name isn't exactly "pci", skip */
+               if (model == NULL || strcmp(np->name, "pci"))
+                       continue;
+               /* If model is not "Spider", skip */
+               if (strcmp(model, "Spider"))
+                       continue;
+               spider_pci_add_one(phb);
+       }
+
+       /* No Spider PCI found, exit */
+       if (spider_pci_count == 0)
+               return 0;
+
+       /* Setup IO callbacks. We only setup MMIO reads. PIO reads will
+        * fallback to MMIO reads (though without a token, thus slower)
+        */
+       ppc_pci_io = spider_pci_io;
+
+       /* Setup ioremap callback */
+       ppc_md.ioremap = spider_ioremap;
+
+       return 0;
+}
+arch_initcall(spider_pci_workaround_init);
index aca4c3db0dde3431b3ff96eb0567442641a42f65..b43466ba8096259222d45c51a8a6ba884c15d577 100644 (file)
 /*
  * IOMMU implementation for Cell Broadband Processor Architecture
- * We just establish a linear mapping at boot by setting all the
- * IOPT cache entries in the CPU.
- * The mapping functions should be identical to pci_direct_iommu, 
- * except for the handling of the high order bit that is required
- * by the Spider bridge. These should be split into a separate
- * file at the point where we get a different bridge chip.
  *
- * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
- *                      Arnd Bergmann <arndb@de.ibm.com>
+ * (C) Copyright IBM Corporation 2006
  *
- * Based on linear mapping
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * Author: Jeremy Kerr <jk@ozlabs.org>
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #undef DEBUG
 
 #include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
 #include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
 
-#include <asm/sections.h>
-#include <asm/iommu.h>
-#include <asm/io.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
+#include <asm/iommu.h>
 #include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/abs_addr.h>
-#include <asm/system.h>
-#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
 #include <asm/udbg.h>
+#include <asm/of_platform.h>
+#include <asm/lmb.h>
 
-#include "iommu.h"
+#include "cbe_regs.h"
+#include "interrupt.h"
 
-static inline unsigned long 
-get_iopt_entry(unsigned long real_address, unsigned long ioid,
-                        unsigned long prot)
-{
-       return (prot & IOPT_PROT_MASK)
-            | (IOPT_COHERENT)
-            | (IOPT_ORDER_VC)
-            | (real_address & IOPT_RPN_MASK)
-            | (ioid & IOPT_IOID_MASK);
-}
+/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
+ * instead of leaving them mapped to some dummy page. This can be
+ * enabled once the appropriate workarounds for spider bugs have
+ * been enabled
+ */
+#define CELL_IOMMU_REAL_UNMAP
 
-typedef struct {
-       unsigned long val;
-} ioste;
+/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
+ * IO PTEs based on the transfer direction. That can be enabled
+ * once spider-net has been fixed to pass the correct direction
+ * to the DMA mapping functions
+ */
+#define CELL_IOMMU_STRICT_PROTECTION
+
+
+#define NR_IOMMUS                      2
+
+/* IOC mmap registers */
+#define IOC_Reg_Size                   0x2000
+
+#define IOC_IOPT_CacheInvd             0x908
+#define IOC_IOPT_CacheInvd_NE_Mask     0xffe0000000000000ul
+#define IOC_IOPT_CacheInvd_IOPTE_Mask  0x000003fffffffff8ul
+#define IOC_IOPT_CacheInvd_Busy                0x0000000000000001ul
+
+#define IOC_IOST_Origin                        0x918
+#define IOC_IOST_Origin_E              0x8000000000000000ul
+#define IOC_IOST_Origin_HW             0x0000000000000800ul
+#define IOC_IOST_Origin_HL             0x0000000000000400ul
+
+#define IOC_IO_ExcpStat                        0x920
+#define IOC_IO_ExcpStat_V              0x8000000000000000ul
+#define IOC_IO_ExcpStat_SPF_Mask       0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_S          0x6000000000000000ul
+#define IOC_IO_ExcpStat_SPF_P          0x4000000000000000ul
+#define IOC_IO_ExcpStat_ADDR_Mask      0x00000007fffff000ul
+#define IOC_IO_ExcpStat_RW_Mask                0x0000000000000800ul
+#define IOC_IO_ExcpStat_IOID_Mask      0x00000000000007fful
+
+#define IOC_IO_ExcpMask                        0x928
+#define IOC_IO_ExcpMask_SFE            0x4000000000000000ul
+#define IOC_IO_ExcpMask_PFE            0x2000000000000000ul
+
+#define IOC_IOCmd_Offset               0x1000
+
+#define IOC_IOCmd_Cfg                  0xc00
+#define IOC_IOCmd_Cfg_TE               0x0000800000000000ul
+
+
+/* Segment table entries */
+#define IOSTE_V                        0x8000000000000000ul /* valid */
+#define IOSTE_H                        0x4000000000000000ul /* cache hint */
+#define IOSTE_PT_Base_RPN_Mask  0x3ffffffffffff000ul /* base RPN of IOPT */
+#define IOSTE_NPPT_Mask                0x0000000000000fe0ul /* no. pages in IOPT */
+#define IOSTE_PS_Mask          0x0000000000000007ul /* page size */
+#define IOSTE_PS_4K            0x0000000000000001ul /*   - 4kB  */
+#define IOSTE_PS_64K           0x0000000000000003ul /*   - 64kB */
+#define IOSTE_PS_1M            0x0000000000000005ul /*   - 1MB  */
+#define IOSTE_PS_16M           0x0000000000000007ul /*   - 16MB */
+
+/* Page table entries */
+#define IOPTE_PP_W             0x8000000000000000ul /* protection: write */
+#define IOPTE_PP_R             0x4000000000000000ul /* protection: read */
+#define IOPTE_M                        0x2000000000000000ul /* coherency required */
+#define IOPTE_SO_R             0x1000000000000000ul /* ordering: writes */
+#define IOPTE_SO_RW             0x1800000000000000ul /* ordering: r & w */
+#define IOPTE_RPN_Mask         0x07fffffffffff000ul /* RPN */
+#define IOPTE_H                        0x0000000000000800ul /* cache hint */
+#define IOPTE_IOID_Mask                0x00000000000007fful /* ioid */
+
+
+/* IOMMU sizing */
+#define IO_SEGMENT_SHIFT       28
+#define IO_PAGENO_BITS         (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
+
+/* The high bit needs to be set on every DMA address */
+#define SPIDER_DMA_OFFSET      0x80000000ul
+
+struct iommu_window {
+       struct list_head list;
+       struct cbe_iommu *iommu;
+       unsigned long offset;
+       unsigned long size;
+       unsigned long pte_offset;
+       unsigned int ioid;
+       struct iommu_table table;
+};
 
-static inline ioste
-mk_ioste(unsigned long val)
-{
-       ioste ioste = { .val = val, };
-       return ioste;
-}
+#define NAMESIZE 8
+struct cbe_iommu {
+       int nid;
+       char name[NAMESIZE];
+       void __iomem *xlate_regs;
+       void __iomem *cmd_regs;
+       unsigned long *stab;
+       unsigned long *ptab;
+       void *pad_page;
+       struct list_head windows;
+};
+
+/* Static array of iommus, one per node
+ *   each contains a list of windows, keyed from dma_window property
+ *   - on bus setup, look for a matching window, or create one
+ *   - on dev setup, assign iommu_table ptr
+ */
+static struct cbe_iommu iommus[NR_IOMMUS];
+static int cbe_nr_iommus;
 
-static inline ioste
-get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
+static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
+               long n_ptes)
 {
-       unsigned long ps;
-       unsigned long iostep;
-       unsigned long nnpt;
-       unsigned long shift;
-
-       switch (page_size) {
-       case 0x1000000:
-               ps = IOST_PS_16M;
-               nnpt = 0;  /* one page per segment */
-               shift = 5; /* segment has 16 iopt entries */
-               break;
-
-       case 0x100000:
-               ps = IOST_PS_1M;
-               nnpt = 0;  /* one page per segment */
-               shift = 1; /* segment has 256 iopt entries */
-               break;
-
-       case 0x10000:
-               ps = IOST_PS_64K;
-               nnpt = 0x07; /* 8 pages per io page table */
-               shift = 0;   /* all entries are used */
-               break;
-
-       case 0x1000:
-               ps = IOST_PS_4K;
-               nnpt = 0x7f; /* 128 pages per io page table */
-               shift = 0;   /* all entries are used */
-               break;
-
-       default: /* not a known compile time constant */
-               {
-                       /* BUILD_BUG_ON() is not usable here */
-                       extern void __get_iost_entry_bad_page_size(void);
-                       __get_iost_entry_bad_page_size();
-               }
-               break;
-       }
+       unsigned long *reg, val;
+       long n;
 
-       iostep = iopt_base +
-                        /* need 8 bytes per iopte */
-                       (((io_address / page_size * 8)
-                        /* align io page tables on 4k page boundaries */
-                                << shift) 
-                        /* nnpt+1 pages go into each iopt */
-                                & ~(nnpt << 12));
-
-       nnpt++; /* this seems to work, but the documentation is not clear
-                  about wether we put nnpt or nnpt-1 into the ioste bits.
-                  In theory, this can't work for 4k pages. */
-       return mk_ioste(IOST_VALID_MASK
-                       | (iostep & IOST_PT_BASE_MASK)
-                       | ((nnpt << 5) & IOST_NNPT_MASK)
-                       | (ps & IOST_PS_MASK));
-}
+       reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
 
-/* compute the address of an io pte */
-static inline unsigned long
-get_ioptep(ioste iost_entry, unsigned long io_address)
-{
-       unsigned long iopt_base;
-       unsigned long page_size;
-       unsigned long page_number;
-       unsigned long iopt_offset;
-
-       iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
-       page_size = iost_entry.val & IOST_PS_MASK;
-
-       /* decode page size to compute page number */
-       page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
-       /* page number is an offset into the io page table */
-       iopt_offset = (page_number << 3) & 0x7fff8ul;
-       return iopt_base + iopt_offset;
-}
+       while (n_ptes > 0) {
+               /* we can invalidate up to 1 << 11 PTEs at once */
+               n = min(n_ptes, 1l << 11);
+               val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
+                       | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
+                       | IOC_IOPT_CacheInvd_Busy;
 
-/* compute the tag field of the iopt cache entry */
-static inline unsigned long
-get_ioc_tag(ioste iost_entry, unsigned long io_address)
-{
-       unsigned long iopte = get_ioptep(iost_entry, io_address);
+               out_be64(reg, val);
+               while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
+                       ;
 
-       return IOPT_VALID_MASK
-            | ((iopte & 0x00000000000000ff8ul) >> 3)
-            | ((iopte & 0x0000003fffffc0000ul) >> 9);
+               n_ptes -= n;
+               pte += n;
+       }
 }
 
-/* compute the hashed 6 bit index for the 4-way associative pte cache */
-static inline unsigned long
-get_ioc_hash(ioste iost_entry, unsigned long io_address)
+static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
+               unsigned long uaddr, enum dma_data_direction direction)
 {
-       unsigned long iopte = get_ioptep(iost_entry, io_address);
-
-       return ((iopte & 0x000000000000001f8ul) >> 3)
-            ^ ((iopte & 0x00000000000020000ul) >> 17)
-            ^ ((iopte & 0x00000000000010000ul) >> 15)
-            ^ ((iopte & 0x00000000000008000ul) >> 13)
-            ^ ((iopte & 0x00000000000004000ul) >> 11)
-            ^ ((iopte & 0x00000000000002000ul) >> 9)
-            ^ ((iopte & 0x00000000000001000ul) >> 7);
+       int i;
+       unsigned long *io_pte, base_pte;
+       struct iommu_window *window =
+               container_of(tbl, struct iommu_window, table);
+
+       /* implementing proper protection causes problems with the spidernet
+        * driver - check mapping directions later, but allow read & write by
+        * default for now.*/
+#ifdef CELL_IOMMU_STRICT_PROTECTION
+       /* to avoid referencing a global, we use a trick here to setup the
+        * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
+        * together for each of the 3 supported direction values. It is then
+        * shifted left so that the fields matching the desired direction
+        * lands on the appropriate bits, and other bits are masked out.
+        */
+       const unsigned long prot = 0xc48;
+       base_pte =
+               ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
+               | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+#else
+       base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
+               (window->ioid & IOPTE_IOID_Mask);
+#endif
+
+       io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
+
+       for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
+               io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+
+       mb();
+
+       invalidate_tce_cache(window->iommu, io_pte, npages);
+
+       pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
+                index, npages, direction, base_pte);
 }
 
-/* same as above, but pretend that we have a simpler 1-way associative
-   pte cache with an 8 bit index */
-static inline unsigned long
-get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
+static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
 {
-       unsigned long iopte = get_ioptep(iost_entry, io_address);
-
-       return ((iopte & 0x000000000000001f8ul) >> 3)
-            ^ ((iopte & 0x00000000000020000ul) >> 17)
-            ^ ((iopte & 0x00000000000010000ul) >> 15)
-            ^ ((iopte & 0x00000000000008000ul) >> 13)
-            ^ ((iopte & 0x00000000000004000ul) >> 11)
-            ^ ((iopte & 0x00000000000002000ul) >> 9)
-            ^ ((iopte & 0x00000000000001000ul) >> 7)
-            ^ ((iopte & 0x0000000000000c000ul) >> 8);
-}
 
-static inline ioste
-get_iost_cache(void __iomem *base, unsigned long index)
-{
-       unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
-       return mk_ioste(in_be64(&p[index]));
-}
+       int i;
+       unsigned long *io_pte, pte;
+       struct iommu_window *window =
+               container_of(tbl, struct iommu_window, table);
 
-static inline void
-set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
-{
-       unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
-       pr_debug("ioste %02lx was %016lx, store %016lx", index,
-                       get_iost_cache(base, index).val, ste.val);
-       out_be64(&p[index], ste.val);
-       pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
-}
+       pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
 
-static inline unsigned long
-get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
-{
-       unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
-       unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);   
+#ifdef CELL_IOMMU_REAL_UNMAP
+       pte = 0;
+#else
+       /* spider bridge does PCI reads after freeing - insert a mapping
+        * to a scratch page instead of an invalid entry */
+       pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
+               | (window->ioid & IOPTE_IOID_Mask);
+#endif
 
-       *tag = tags[index];
-       rmb();
-       return *p;
-}
+       io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset);
 
-static inline void
-set_iopt_cache(void __iomem *base, unsigned long index,
-                unsigned long tag, unsigned long val)
-{
-       unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
-       unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
+       for (i = 0; i < npages; i++)
+               io_pte[i] = pte;
+
+       mb();
 
-       out_be64(p, val);
-       out_be64(&tags[index], tag);
+       invalidate_tce_cache(window->iommu, io_pte, npages);
 }
 
-static inline void
-set_iost_origin(void __iomem *base)
+static irqreturn_t ioc_interrupt(int irq, void *data)
 {
-       unsigned long __iomem *p = base + IOC_ST_ORIGIN;
-       unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
-
-       pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
-       out_be64(p, origin);
+       unsigned long stat;
+       struct cbe_iommu *iommu = data;
+
+       stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+
+       /* Might want to rate limit it */
+       printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
+       printk(KERN_ERR "  V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
+              !!(stat & IOC_IO_ExcpStat_V),
+              (stat & IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
+              (stat & IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
+              (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
+              (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
+       printk(KERN_ERR "  page=0x%016lx\n",
+              stat & IOC_IO_ExcpStat_ADDR_Mask);
+
+       /* clear interrupt */
+       stat &= ~IOC_IO_ExcpStat_V;
+       out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
+
+       return IRQ_HANDLED;
 }
 
-static inline void
-set_iocmd_config(void __iomem *base)
+static int cell_iommu_find_ioc(int nid, unsigned long *base)
 {
-       unsigned long __iomem *p = base + 0xc00;
-       unsigned long conf;
+       struct device_node *np;
+       struct resource r;
+
+       *base = 0;
+
+       /* First look for new style /be nodes */
+       for_each_node_by_name(np, "ioc") {
+               if (of_node_to_nid(np) != nid)
+                       continue;
+               if (of_address_to_resource(np, 0, &r)) {
+                       printk(KERN_ERR "iommu: can't get address for %s\n",
+                              np->full_name);
+                       continue;
+               }
+               *base = r.start;
+               of_node_put(np);
+               return 0;
+       }
 
-       conf = in_be64(p);
-       pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
-       out_be64(p, conf | IOCMD_CONF_TE);
+       /* Ok, let's try the old way */
+       for_each_node_by_type(np, "cpu") {
+               const unsigned int *nidp;
+               const unsigned long *tmp;
+
+               nidp = get_property(np, "node-id", NULL);
+               if (nidp && *nidp == nid) {
+                       tmp = get_property(np, "ioc-translation", NULL);
+                       if (tmp) {
+                               *base = *tmp;
+                               of_node_put(np);
+                               return 0;
+                       }
+               }
+       }
+
+       return -ENODEV;
 }
 
-static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
+static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long size)
 {
-       set_iocmd_config(base);
-       set_iost_origin(mmio_base);
-}
+       struct page *page;
+       int ret, i;
+       unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages;
+       unsigned long xlate_base;
+       unsigned int virq;
+
+       if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
+               panic("%s: missing IOC register mappings for node %d\n",
+                     __FUNCTION__, iommu->nid);
+
+       iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
+       iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
+
+       segments = size >> IO_SEGMENT_SHIFT;
+       pages_per_segment = 1ull << IO_PAGENO_BITS;
+
+       pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
+                       __FUNCTION__, iommu->nid, segments, pages_per_segment);
+
+       /* set up the segment table */
+       page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+       BUG_ON(!page);
+       iommu->stab = page_address(page);
+       clear_page(iommu->stab);
+
+       /* ... and the page tables. Since these are contiguous, we can treat
+        * the page tables as one array of ptes, like pSeries does.
+        */
+       ptab_size = segments * pages_per_segment * sizeof(unsigned long);
+       pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__,
+                       iommu->nid, ptab_size, get_order(ptab_size));
+       page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
+       BUG_ON(!page);
+
+       iommu->ptab = page_address(page);
+       memset(iommu->ptab, 0, ptab_size);
+
+       /* allocate a bogus page for the end of each mapping */
+       page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
+       BUG_ON(!page);
+       iommu->pad_page = page_address(page);
+       clear_page(iommu->pad_page);
+
+       /* number of pages needed for a page table */
+       n_pte_pages = (pages_per_segment *
+                      sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT;
+
+       pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
+                       __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab,
+                       n_pte_pages);
+
+       /* initialise the STEs */
+       reg = IOSTE_V | ((n_pte_pages - 1) << 5);
+
+       if (IOMMU_PAGE_SIZE == 0x1000)
+               reg |= IOSTE_PS_4K;
+       else if (IOMMU_PAGE_SIZE == 0x10000)
+               reg |= IOSTE_PS_64K;
+       else {
+               extern void __unknown_page_size_error(void);
+               __unknown_page_size_error();
+       }
+
+       pr_debug("Setting up IOMMU stab:\n");
+       for (i = 0; i * (1ul << IO_SEGMENT_SHIFT) < size; i++) {
+               iommu->stab[i] = reg |
+                       (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i);
+               pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
+       }
 
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-static void iommu_bus_setup_null(struct pci_bus *b) { }
+       /* ensure that the STEs have updated */
+       mb();
 
-struct cell_iommu {
-       unsigned long base;
-       unsigned long mmio_base;
-       void __iomem *mapped_base;
-       void __iomem *mapped_mmio_base;
-};
+       /* setup interrupts for the iommu. */
+       reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
+       out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
+                       reg & ~IOC_IO_ExcpStat_V);
+       out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
+                       IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
 
-static struct cell_iommu cell_iommus[NR_CPUS];
+       virq = irq_create_mapping(NULL,
+                       IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
+       BUG_ON(virq == NO_IRQ);
 
-/* initialize the iommu to support a simple linear mapping
- * for each DMA window used by any device. For now, we
- * happen to know that there is only one DMA window in use,
- * starting at iopt_phys_offset. */
-static void cell_do_map_iommu(struct cell_iommu *iommu,
-                             unsigned int ioid,
-                             unsigned long map_start,
-                             unsigned long map_size)
-{
-       unsigned long io_address, real_address;
-       void __iomem *ioc_base, *ioc_mmio_base;
-       ioste ioste;
-       unsigned long index;
+       ret = request_irq(virq, ioc_interrupt, IRQF_DISABLED,
+                       iommu->name, iommu);
+       BUG_ON(ret);
 
-       /* we pretend the io page table was at a very high address */
-       const unsigned long fake_iopt = 0x10000000000ul;
-       const unsigned long io_page_size = 0x1000000; /* use 16M pages */
-       const unsigned long io_segment_size = 0x10000000; /* 256M */
-
-       ioc_base = iommu->mapped_base;
-       ioc_mmio_base = iommu->mapped_mmio_base;
-
-       for (real_address = 0, io_address = map_start;
-            io_address <= map_start + map_size;
-            real_address += io_page_size, io_address += io_page_size) {
-               ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
-               if ((real_address % io_segment_size) == 0) /* segment start */
-                       set_iost_cache(ioc_mmio_base,
-                                      io_address >> 28, ioste);
-               index = get_ioc_hash_1way(ioste, io_address);
-               pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
-                                        io_address, index, ioste.val);
-               set_iopt_cache(ioc_mmio_base,
-                       get_ioc_hash_1way(ioste, io_address),
-                       get_ioc_tag(ioste, io_address),
-                       get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
-       }
+       /* set the IOC segment table origin register (and turn on the iommu) */
+       reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
+       out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
+       in_be64(iommu->xlate_regs + IOC_IOST_Origin);
+
+       /* turn on IO translation */
+       reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
+       out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
 }
 
-static void iommu_devnode_setup(struct device_node *d)
+#if 0/* Unused for now */
+static struct iommu_window *find_window(struct cbe_iommu *iommu,
+               unsigned long offset, unsigned long size)
 {
-       const unsigned int *ioid;
-       unsigned long map_start, map_size, token;
-       const unsigned long *dma_window;
-       struct cell_iommu *iommu;
+       struct iommu_window *window;
 
-       ioid = get_property(d, "ioid", NULL);
-       if (!ioid)
-               pr_debug("No ioid entry found !\n");
+       /* todo: check for overlapping (but not equal) windows) */
 
-       dma_window = get_property(d, "ibm,dma-window", NULL);
-       if (!dma_window)
-               pr_debug("No ibm,dma-window entry found !\n");
+       list_for_each_entry(window, &(iommu->windows), list) {
+               if (window->offset == offset && window->size == size)
+                       return window;
+       }
 
-       map_start = dma_window[1];
-       map_size = dma_window[2];
-       token = dma_window[0] >> 32;
+       return NULL;
+}
+#endif
 
-       iommu = &cell_iommus[token];
+static struct iommu_window * __init
+cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
+                       unsigned long offset, unsigned long size,
+                       unsigned long pte_offset)
+{
+       struct iommu_window *window;
+       const unsigned int *ioid;
 
-       cell_do_map_iommu(iommu, *ioid, map_start, map_size);
+       ioid = get_property(np, "ioid", NULL);
+       if (ioid == NULL)
+               printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
+                      np->full_name);
+
+       window = kmalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
+       BUG_ON(window == NULL);
+
+       window->offset = offset;
+       window->size = size;
+       window->ioid = ioid ? *ioid : 0;
+       window->iommu = iommu;
+       window->pte_offset = pte_offset;
+
+       window->table.it_blocksize = 16;
+       window->table.it_base = (unsigned long)iommu->ptab;
+       window->table.it_index = iommu->nid;
+       window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) +
+               window->pte_offset;
+       window->table.it_size = size >> IOMMU_PAGE_SHIFT;
+
+       iommu_init_table(&window->table, iommu->nid);
+
+       pr_debug("\tioid      %d\n", window->ioid);
+       pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
+       pr_debug("\tbase      0x%016lx\n", window->table.it_base);
+       pr_debug("\toffset    0x%lx\n", window->table.it_offset);
+       pr_debug("\tsize      %ld\n", window->table.it_size);
+
+       list_add(&window->list, &iommu->windows);
+
+       if (offset != 0)
+               return window;
+
+       /* We need to map and reserve the first IOMMU page since it's used
+        * by the spider workaround. In theory, we only need to do that when
+        * running on spider but it doesn't really matter.
+        *
+        * This code also assumes that we have a window that starts at 0,
+        * which is the case on all spider based blades.
+        */
+       __set_bit(0, window->table.it_map);
+       tce_build_cell(&window->table, window->table.it_offset, 1,
+                      (unsigned long)iommu->pad_page, DMA_TO_DEVICE);
+       window->table.it_hint = window->table.it_blocksize;
+
+       return window;
 }
 
-static void iommu_bus_setup(struct pci_bus *b)
+static struct cbe_iommu *cell_iommu_for_node(int nid)
 {
-       struct device_node *d = (struct device_node *)b->sysdata;
-       iommu_devnode_setup(d);
-}
+       int i;
 
+       for (i = 0; i < cbe_nr_iommus; i++)
+               if (iommus[i].nid == nid)
+                       return &iommus[i];
+       return NULL;
+}
 
-static int cell_map_iommu_hardcoded(int num_nodes)
+static void cell_dma_dev_setup(struct device *dev)
 {
-       struct cell_iommu *iommu = NULL;
-
-       pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
+       struct iommu_window *window;
+       struct cbe_iommu *iommu;
+       struct dev_archdata *archdata = &dev->archdata;
+
+       /* If we run without iommu, no need to do anything */
+       if (pci_dma_ops == &dma_direct_ops)
+               return;
+
+       /* Current implementation uses the first window available in that
+        * node's iommu. We -might- do something smarter later though it may
+        * never be necessary
+        */
+       iommu = cell_iommu_for_node(archdata->numa_node);
+       if (iommu == NULL || list_empty(&iommu->windows)) {
+               printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
+                      archdata->of_node ? archdata->of_node->full_name : "?",
+                      archdata->numa_node);
+               return;
+       }
+       window = list_entry(iommu->windows.next, struct iommu_window, list);
 
-       /* node 0 */
-       iommu = &cell_iommus[0];
-       iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
-       iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
+       archdata->dma_data = &window->table;
+}
 
-       enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+static void cell_pci_dma_dev_setup(struct pci_dev *dev)
+{
+       cell_dma_dev_setup(&dev->dev);
+}
 
-       cell_do_map_iommu(iommu, 0x048a,
-                         0x20000000ul,0x20000000ul);
+static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
+                             void *data)
+{
+       struct device *dev = data;
 
-       if (num_nodes < 2)
+       /* We are only intereted in device addition */
+       if (action != BUS_NOTIFY_ADD_DEVICE)
                return 0;
 
-       /* node 1 */
-       iommu = &cell_iommus[1];
-       iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
-       iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
-
-       enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+       /* We use the PCI DMA ops */
+       dev->archdata.dma_ops = pci_dma_ops;
 
-       cell_do_map_iommu(iommu, 0x048a,
-                         0x20000000,0x20000000ul);
+       cell_dma_dev_setup(dev);
 
        return 0;
 }
 
+static struct notifier_block cell_of_bus_notifier = {
+       .notifier_call = cell_of_bus_notify
+};
 
-static int cell_map_iommu(void)
+static int __init cell_iommu_get_window(struct device_node *np,
+                                        unsigned long *base,
+                                        unsigned long *size)
 {
-       unsigned int num_nodes = 0;
-       const unsigned int *node_id;
-       const unsigned long *base, *mmio_base;
-       struct device_node *dn;
-       struct cell_iommu *iommu = NULL;
-
-       /* determine number of nodes (=iommus) */
-       pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
-       for(dn = of_find_node_by_type(NULL, "cpu");
-           dn;
-           dn = of_find_node_by_type(dn, "cpu")) {
-               node_id = get_property(dn, "node-id", NULL);
-
-               if (num_nodes < *node_id)
-                       num_nodes = *node_id;
-               }
-
-       num_nodes++;
-       pr_debug("%i found.\n", num_nodes);
+       const void *dma_window;
+       unsigned long index;
 
-       /* map the iommu registers for each node */
-       pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
-       for(dn = of_find_node_by_type(NULL, "cpu");
-           dn;
-           dn = of_find_node_by_type(dn, "cpu")) {
+       /* Use ibm,dma-window if available, else, hard code ! */
+       dma_window = get_property(np, "ibm,dma-window", NULL);
+       if (dma_window == NULL) {
+               *base = 0;
+               *size = 0x80000000u;
+               return -ENODEV;
+       }
 
-               node_id = get_property(dn, "node-id", NULL);
-               base = get_property(dn, "ioc-cache", NULL);
-               mmio_base = get_property(dn, "ioc-translation", NULL);
+       of_parse_dma_window(np, dma_window, &index, base, size);
+       return 0;
+}
 
-               if (!base || !mmio_base || !node_id)
-                       return cell_map_iommu_hardcoded(num_nodes);
+static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset)
+{
+       struct cbe_iommu *iommu;
+       unsigned long base, size;
+       int nid, i;
+
+       /* Get node ID */
+       nid = of_node_to_nid(np);
+       if (nid < 0) {
+               printk(KERN_ERR "iommu: failed to get node for %s\n",
+                      np->full_name);
+               return;
+       }
+       pr_debug("iommu: setting up iommu for node %d (%s)\n",
+                nid, np->full_name);
+
+       /* XXX todo: If we can have multiple windows on the same IOMMU, which
+        * isn't the case today, we probably want here to check wether the
+        * iommu for that node is already setup.
+        * However, there might be issue with getting the size right so let's
+        * ignore that for now. We might want to completely get rid of the
+        * multiple window support since the cell iommu supports per-page ioids
+        */
+
+       if (cbe_nr_iommus >= NR_IOMMUS) {
+               printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
+                      np->full_name);
+               return;
+       }
 
-               iommu = &cell_iommus[*node_id];
-               iommu->base = *base;
-               iommu->mmio_base = *mmio_base;
+       /* Init base fields */
+       i = cbe_nr_iommus++;
+       iommu = &iommus[i];
+       iommu->stab = 0;
+       iommu->nid = nid;
+       snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
+       INIT_LIST_HEAD(&iommu->windows);
 
-               iommu->mapped_base = ioremap(*base, 0x1000);
-               iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000);
+       /* Obtain a window for it */
+       cell_iommu_get_window(np, &base, &size);
 
-               enable_mapping(iommu->mapped_base,
-                              iommu->mapped_mmio_base);
+       pr_debug("\ttranslating window 0x%lx...0x%lx\n",
+                base, base + size - 1);
 
-               /* everything else will be done in iommu_bus_setup */
-       }
+       /* Initialize the hardware */
+       cell_iommu_setup_hardware(iommu, size);
 
-       return 1;
+       /* Setup the iommu_table */
+       cell_iommu_setup_window(iommu, np, base, size,
+                               offset >> IOMMU_PAGE_SHIFT);
 }
 
-static void *cell_alloc_coherent(struct device *hwdev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag)
+static void __init cell_disable_iommus(void)
 {
-       void *ret;
-
-       ret = (void *)__get_free_pages(flag, get_order(size));
-       if (ret != NULL) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_abs(ret) | CELL_DMA_VALID;
+       int node;
+       unsigned long base, val;
+       void __iomem *xregs, *cregs;
+
+       /* Make sure IOC translation is disabled on all nodes */
+       for_each_online_node(node) {
+               if (cell_iommu_find_ioc(node, &base))
+                       continue;
+               xregs = ioremap(base, IOC_Reg_Size);
+               if (xregs == NULL)
+                       continue;
+               cregs = xregs + IOC_IOCmd_Offset;
+
+               pr_debug("iommu: cleaning up iommu on node %d\n", node);
+
+               out_be64(xregs + IOC_IOST_Origin, 0);
+               (void)in_be64(xregs + IOC_IOST_Origin);
+               val = in_be64(cregs + IOC_IOCmd_Cfg);
+               val &= ~IOC_IOCmd_Cfg_TE;
+               out_be64(cregs + IOC_IOCmd_Cfg, val);
+               (void)in_be64(cregs + IOC_IOCmd_Cfg);
+
+               iounmap(xregs);
        }
-       return ret;
 }
 
-static void cell_free_coherent(struct device *hwdev, size_t size,
-                                void *vaddr, dma_addr_t dma_handle)
+static int __init cell_iommu_init_disabled(void)
 {
-       free_pages((unsigned long)vaddr, get_order(size));
-}
+       struct device_node *np = NULL;
+       unsigned long base = 0, size;
+
+       /* When no iommu is present, we use direct DMA ops */
+       pci_dma_ops = &dma_direct_ops;
+
+       /* First make sure all IOC translation is turned off */
+       cell_disable_iommus();
+
+       /* If we have no Axon, we set up the spider DMA magic offset */
+       if (of_find_node_by_name(NULL, "axon") == NULL)
+               dma_direct_offset = SPIDER_DMA_OFFSET;
+
+       /* Now we need to check to see where the memory is mapped
+        * in PCI space. We assume that all busses use the same dma
+        * window which is always the case so far on Cell, thus we
+        * pick up the first pci-internal node we can find and check
+        * the DMA window from there.
+        */
+       for_each_node_by_name(np, "axon") {
+               if (np->parent == NULL || np->parent->parent != NULL)
+                       continue;
+               if (cell_iommu_get_window(np, &base, &size) == 0)
+                       break;
+       }
+       if (np == NULL) {
+               for_each_node_by_name(np, "pci-internal") {
+                       if (np->parent == NULL || np->parent->parent != NULL)
+                               continue;
+                       if (cell_iommu_get_window(np, &base, &size) == 0)
+                               break;
+               }
+       }
+       of_node_put(np);
+
+       /* If we found a DMA window, we check if it's big enough to enclose
+        * all of physical memory. If not, we force enable IOMMU
+        */
+       if (np && size < lmb_end_of_DRAM()) {
+               printk(KERN_WARNING "iommu: force-enabled, dma window"
+                      " (%ldMB) smaller than total memory (%ldMB)\n",
+                      size >> 20, lmb_end_of_DRAM() >> 20);
+               return -ENODEV;
+       }
 
-static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
-               size_t size, enum dma_data_direction direction)
-{
-       return virt_to_abs(ptr) | CELL_DMA_VALID;
-}
+       dma_direct_offset += base;
 
-static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
-               size_t size, enum dma_data_direction direction)
-{
+       printk("iommu: disabled, direct DMA offset is 0x%lx\n",
+              dma_direct_offset);
+
+       return 0;
 }
 
-static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
+static int __init cell_iommu_init(void)
 {
-       int i;
+       struct device_node *np;
+
+       if (!machine_is(cell))
+               return -ENODEV;
+
+       /* If IOMMU is disabled or we have little enough RAM to not need
+        * to enable it, we setup a direct mapping.
+        *
+        * Note: should we make sure we have the IOMMU actually disabled ?
+        */
+       if (iommu_is_off ||
+           (!iommu_force_on && lmb_end_of_DRAM() <= 0x80000000ull))
+               if (cell_iommu_init_disabled() == 0)
+                       goto bail;
+
+       /* Setup various ppc_md. callbacks */
+       ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+       ppc_md.tce_build = tce_build_cell;
+       ppc_md.tce_free = tce_free_cell;
+
+       /* Create an iommu for each /axon node.  */
+       for_each_node_by_name(np, "axon") {
+               if (np->parent == NULL || np->parent->parent != NULL)
+                       continue;
+               cell_iommu_init_one(np, 0);
+       }
 
-       for (i = 0; i < nents; i++, sg++) {
-               sg->dma_address = (page_to_phys(sg->page) + sg->offset)
-                                       | CELL_DMA_VALID;
-               sg->dma_length = sg->length;
+       /* Create an iommu for each toplevel /pci-internal node for
+        * old hardware/firmware
+        */
+       for_each_node_by_name(np, "pci-internal") {
+               if (np->parent == NULL || np->parent->parent != NULL)
+                       continue;
+               cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
        }
 
-       return nents;
-}
+       /* Setup default PCI iommu ops */
+       pci_dma_ops = &dma_iommu_ops;
 
-static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
-               int nents, enum dma_data_direction direction)
-{
-}
+ bail:
+       /* Register callbacks on OF platform device addition/removal
+        * to handle linking them to the right DMA operations
+        */
+       bus_register_notifier(&of_platform_bus_type, &cell_of_bus_notifier);
 
-static int cell_dma_supported(struct device *dev, u64 mask)
-{
-       return mask < 0x100000000ull;
+       return 0;
 }
+arch_initcall(cell_iommu_init);
 
-static struct dma_mapping_ops cell_iommu_ops = {
-       .alloc_coherent = cell_alloc_coherent,
-       .free_coherent = cell_free_coherent,
-       .map_single = cell_map_single,
-       .unmap_single = cell_unmap_single,
-       .map_sg = cell_map_sg,
-       .unmap_sg = cell_unmap_sg,
-       .dma_supported = cell_dma_supported,
-};
-
-void cell_init_iommu(void)
-{
-       int setup_bus = 0;
-
-       if (of_find_node_by_path("/mambo")) {
-               pr_info("Not using iommu on systemsim\n");
-       } else {
-
-               if (!(of_chosen &&
-                     get_property(of_chosen, "linux,iommu-off", NULL)))
-                       setup_bus = cell_map_iommu();
-
-               if (setup_bus) {
-                       pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
-                       ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-                       ppc_md.iommu_bus_setup = iommu_bus_setup;
-               } else {
-                       pr_debug("%s: IOMMU mapping activated, "
-                                "no device action necessary\n", __FUNCTION__);
-                       /* Direct I/O, IOMMU off */
-                       ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-                       ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-               }
-       }
-
-       pci_dma_ops = cell_iommu_ops;
-}
diff --git a/arch/powerpc/platforms/cell/iommu.h b/arch/powerpc/platforms/cell/iommu.h
deleted file mode 100644 (file)
index 490d77a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef CELL_IOMMU_H
-#define CELL_IOMMU_H
-
-/* some constants */
-enum {
-       /* segment table entries */
-       IOST_VALID_MASK   = 0x8000000000000000ul,
-       IOST_TAG_MASK     = 0x3000000000000000ul,
-       IOST_PT_BASE_MASK = 0x000003fffffff000ul,
-       IOST_NNPT_MASK    = 0x0000000000000fe0ul,
-       IOST_PS_MASK      = 0x000000000000000ful,
-
-       IOST_PS_4K        = 0x1,
-       IOST_PS_64K       = 0x3,
-       IOST_PS_1M        = 0x5,
-       IOST_PS_16M       = 0x7,
-
-       /* iopt tag register */
-       IOPT_VALID_MASK   = 0x0000000200000000ul,
-       IOPT_TAG_MASK     = 0x00000001fffffffful,
-
-       /* iopt cache register */
-       IOPT_PROT_MASK    = 0xc000000000000000ul,
-       IOPT_PROT_NONE    = 0x0000000000000000ul,
-       IOPT_PROT_READ    = 0x4000000000000000ul,
-       IOPT_PROT_WRITE   = 0x8000000000000000ul,
-       IOPT_PROT_RW      = 0xc000000000000000ul,
-       IOPT_COHERENT     = 0x2000000000000000ul,
-       
-       IOPT_ORDER_MASK   = 0x1800000000000000ul,
-       /* order access to same IOID/VC on same address */
-       IOPT_ORDER_ADDR   = 0x0800000000000000ul,
-       /* similar, but only after a write access */
-       IOPT_ORDER_WRITES = 0x1000000000000000ul,
-       /* Order all accesses to same IOID/VC */
-       IOPT_ORDER_VC     = 0x1800000000000000ul,
-       
-       IOPT_RPN_MASK     = 0x000003fffffff000ul,
-       IOPT_HINT_MASK    = 0x0000000000000800ul,
-       IOPT_IOID_MASK    = 0x00000000000007fful,
-
-       IOSTO_ENABLE      = 0x8000000000000000ul,
-       IOSTO_ORIGIN      = 0x000003fffffff000ul,
-       IOSTO_HW          = 0x0000000000000800ul,
-       IOSTO_SW          = 0x0000000000000400ul,
-
-       IOCMD_CONF_TE     = 0x0000800000000000ul,
-
-       /* memory mapped registers */
-       IOC_PT_CACHE_DIR  = 0x000,
-       IOC_ST_CACHE_DIR  = 0x800,
-       IOC_PT_CACHE_REG  = 0x910,
-       IOC_ST_ORIGIN     = 0x918,
-       IOC_CONF          = 0x930,
-
-       /* The high bit needs to be set on every DMA address,
-          only 2GB are addressable */
-       CELL_DMA_VALID    = 0x80000000,
-       CELL_DMA_MASK     = 0x7fffffff,
-};
-
-
-void cell_init_iommu(void);
-
-#endif
index 9f2e4ed20a57d8af2af6cc76ca4586ce7e793681..8c20f0fb8651f23b204f455bbf75a23581bef746 100644 (file)
 #include "pervasive.h"
 #include "cbe_regs.h"
 
-static DEFINE_SPINLOCK(cbe_pervasive_lock);
-
-static void __init cbe_enable_pause_zero(void)
+static void cbe_power_save(void)
 {
-       unsigned long thread_switch_control;
-       unsigned long temp_register;
-       struct cbe_pmd_regs __iomem *pregs;
-
-       spin_lock_irq(&cbe_pervasive_lock);
-       pregs = cbe_get_cpu_pmd_regs(smp_processor_id());
-       if (pregs == NULL)
-               goto out;
+       unsigned long ctrl, thread_switch_control;
 
-       pr_debug("Power Management: CPU %d\n", smp_processor_id());
-
-        /* Enable Pause(0) control bit */
-       temp_register = in_be64(&pregs->pm_control);
+       /*
+        * We need to hard disable interrupts, but we also need to mark them
+        * hard disabled in the PACA so that the local_irq_enable() done by
+        * our caller upon return propertly hard enables.
+        */
+       hard_irq_disable();
+       get_paca()->hard_enabled = 0;
 
-       out_be64(&pregs->pm_control,
-                temp_register | CBE_PMD_PAUSE_ZERO_CONTROL);
+       ctrl = mfspr(SPRN_CTRLF);
 
        /* Enable DEC and EE interrupt request */
        thread_switch_control  = mfspr(SPRN_TSC_CELL);
        thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
 
-       switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
+       switch (ctrl & CTRL_CT) {
        case CTRL_CT0:
                thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
                break;
@@ -75,58 +68,21 @@ static void __init cbe_enable_pause_zero(void)
                        __FUNCTION__);
                break;
        }
-
        mtspr(SPRN_TSC_CELL, thread_switch_control);
 
-out:
-       spin_unlock_irq(&cbe_pervasive_lock);
-}
-
-static void cbe_idle(void)
-{
-       unsigned long ctrl;
+       /*
+        * go into low thread priority, medium priority will be
+        * restored for us after wake-up.
+        */
+       HMT_low();
 
-       /* Why do we do that on every idle ? Couldn't that be done once for
-        * all or do we lose the state some way ? Also, the pm_control
-        * register setting, that can't be set once at boot ? We really want
-        * to move that away in order to implement a simple powersave
+       /*
+        * atomically disable thread execution and runlatch.
+        * External and Decrementer exceptions are still handled when the
+        * thread is disabled but now enter in cbe_system_reset_exception()
         */
-       cbe_enable_pause_zero();
-
-       while (1) {
-               if (!need_resched()) {
-                       local_irq_disable();
-                       while (!need_resched()) {
-                               /* go into low thread priority */
-                               HMT_low();
-
-                               /*
-                                * atomically disable thread execution
-                                * and runlatch.
-                                * External and Decrementer exceptions
-                                * are still handled when the thread
-                                * is disabled but now enter in
-                                * cbe_system_reset_exception()
-                                */
-                               ctrl = mfspr(SPRN_CTRLF);
-                               ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
-                               mtspr(SPRN_CTRLT, ctrl);
-                       }
-                       /* restore thread prio */
-                       HMT_medium();
-                       local_irq_enable();
-               }
-
-               /*
-                * turn runlatch on again before scheduling the
-                * process we just woke up
-                */
-               ppc64_runlatch_on();
-
-               preempt_enable_no_resched();
-               schedule();
-               preempt_disable();
-       }
+       ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
+       mtspr(SPRN_CTRLT, ctrl);
 }
 
 static int cbe_system_reset_exception(struct pt_regs *regs)
@@ -158,9 +114,20 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
 
 void __init cbe_pervasive_init(void)
 {
+       int cpu;
        if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
                return;
 
-       ppc_md.idle_loop = cbe_idle;
+       for_each_possible_cpu(cpu) {
+               struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
+               if (!regs)
+                       continue;
+
+                /* Enable Pause(0) control bit */
+               out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
+                                           CBE_PMD_PAUSE_ZERO_CONTROL);
+       }
+
+       ppc_md.power_save = cbe_power_save;
        ppc_md.system_reset_exception = cbe_system_reset_exception;
 }
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
new file mode 100644 (file)
index 0000000..99c6120
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2001,2006
+ *
+ * Author:
+ *    David Erb (djerb@us.ibm.com)
+ *    Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/irq_regs.h>
+#include <asm/machdep.h>
+#include <asm/pmc.h>
+#include <asm/reg.h>
+#include <asm/spu.h>
+
+#include "cbe_regs.h"
+#include "interrupt.h"
+
+/*
+ * When writing to write-only mmio addresses, save a shadow copy. All of the
+ * registers are 32-bit, but stored in the upper-half of a 64-bit field in
+ * pmd_regs.
+ */
+
+#define WRITE_WO_MMIO(reg, x)                                  \
+       do {                                                    \
+               u32 _x = (x);                                   \
+               struct cbe_pmd_regs __iomem *pmd_regs;          \
+               struct cbe_pmd_shadow_regs *shadow_regs;        \
+               pmd_regs = cbe_get_cpu_pmd_regs(cpu);           \
+               shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+               out_be64(&(pmd_regs->reg), (((u64)_x) << 32));  \
+               shadow_regs->reg = _x;                          \
+       } while (0)
+
+#define READ_SHADOW_REG(val, reg)                              \
+       do {                                                    \
+               struct cbe_pmd_shadow_regs *shadow_regs;        \
+               shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
+               (val) = shadow_regs->reg;                       \
+       } while (0)
+
+#define READ_MMIO_UPPER32(val, reg)                            \
+       do {                                                    \
+               struct cbe_pmd_regs __iomem *pmd_regs;          \
+               pmd_regs = cbe_get_cpu_pmd_regs(cpu);           \
+               (val) = (u32)(in_be64(&pmd_regs->reg) >> 32);   \
+       } while (0)
+
+/*
+ * Physical counter registers.
+ * Each physical counter can act as one 32-bit counter or two 16-bit counters.
+ */
+
+u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
+{
+       u32 val_in_latch, val = 0;
+
+       if (phys_ctr < NR_PHYS_CTRS) {
+               READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
+
+               /* Read the latch or the actual counter, whichever is newer. */
+               if (val_in_latch & (1 << phys_ctr)) {
+                       READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
+               } else {
+                       READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
+               }
+       }
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
+
+void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
+{
+       struct cbe_pmd_shadow_regs *shadow_regs;
+       u32 pm_ctrl;
+
+       if (phys_ctr < NR_PHYS_CTRS) {
+               /* Writing to a counter only writes to a hardware latch.
+                * The new value is not propagated to the actual counter
+                * until the performance monitor is enabled.
+                */
+               WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
+
+               pm_ctrl = cbe_read_pm(cpu, pm_control);
+               if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
+                       /* The counters are already active, so we need to
+                        * rewrite the pm_control register to "re-enable"
+                        * the PMU.
+                        */
+                       cbe_write_pm(cpu, pm_control, pm_ctrl);
+               } else {
+                       shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+                       shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
+
+/*
+ * "Logical" counter registers.
+ * These will read/write 16-bits or 32-bits depending on the
+ * current size of the counter. Counters 4 - 7 are always 16-bit.
+ */
+
+u32 cbe_read_ctr(u32 cpu, u32 ctr)
+{
+       u32 val;
+       u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+       val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+       if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
+               val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_ctr);
+
+void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
+{
+       u32 phys_ctr;
+       u32 phys_val;
+
+       phys_ctr = ctr & (NR_PHYS_CTRS - 1);
+
+       if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
+               phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
+
+               if (ctr < NR_PHYS_CTRS)
+                       val = (val << 16) | (phys_val & 0xffff);
+               else
+                       val = (val & 0xffff) | (phys_val & 0xffff0000);
+       }
+
+       cbe_write_phys_ctr(cpu, phys_ctr, val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_ctr);
+
+/*
+ * Counter-control registers.
+ * Each "logical" counter has a corresponding control register.
+ */
+
+u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
+{
+       u32 pm07_control = 0;
+
+       if (ctr < NR_CTRS)
+               READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
+
+       return pm07_control;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
+
+void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
+{
+       if (ctr < NR_CTRS)
+               WRITE_WO_MMIO(pm07_control[ctr], val);
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
+
+/*
+ * Other PMU control registers. Most of these are write-only.
+ */
+
+u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
+{
+       u32 val = 0;
+
+       switch (reg) {
+       case group_control:
+               READ_SHADOW_REG(val, group_control);
+               break;
+
+       case debug_bus_control:
+               READ_SHADOW_REG(val, debug_bus_control);
+               break;
+
+       case trace_address:
+               READ_MMIO_UPPER32(val, trace_address);
+               break;
+
+       case ext_tr_timer:
+               READ_SHADOW_REG(val, ext_tr_timer);
+               break;
+
+       case pm_status:
+               READ_MMIO_UPPER32(val, pm_status);
+               break;
+
+       case pm_control:
+               READ_SHADOW_REG(val, pm_control);
+               break;
+
+       case pm_interval:
+               READ_SHADOW_REG(val, pm_interval);
+               break;
+
+       case pm_start_stop:
+               READ_SHADOW_REG(val, pm_start_stop);
+               break;
+       }
+
+       return val;
+}
+EXPORT_SYMBOL_GPL(cbe_read_pm);
+
+void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
+{
+       switch (reg) {
+       case group_control:
+               WRITE_WO_MMIO(group_control, val);
+               break;
+
+       case debug_bus_control:
+               WRITE_WO_MMIO(debug_bus_control, val);
+               break;
+
+       case trace_address:
+               WRITE_WO_MMIO(trace_address, val);
+               break;
+
+       case ext_tr_timer:
+               WRITE_WO_MMIO(ext_tr_timer, val);
+               break;
+
+       case pm_status:
+               WRITE_WO_MMIO(pm_status, val);
+               break;
+
+       case pm_control:
+               WRITE_WO_MMIO(pm_control, val);
+               break;
+
+       case pm_interval:
+               WRITE_WO_MMIO(pm_interval, val);
+               break;
+
+       case pm_start_stop:
+               WRITE_WO_MMIO(pm_start_stop, val);
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(cbe_write_pm);
+
+/*
+ * Get/set the size of a physical counter to either 16 or 32 bits.
+ */
+
+u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
+{
+       u32 pm_ctrl, size = 0;
+
+       if (phys_ctr < NR_PHYS_CTRS) {
+               pm_ctrl = cbe_read_pm(cpu, pm_control);
+               size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
+       }
+
+       return size;
+}
+EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
+
+void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
+{
+       u32 pm_ctrl;
+
+       if (phys_ctr < NR_PHYS_CTRS) {
+               pm_ctrl = cbe_read_pm(cpu, pm_control);
+               switch (ctr_size) {
+               case 16:
+                       pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
+                       break;
+
+               case 32:
+                       pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
+                       break;
+               }
+               cbe_write_pm(cpu, pm_control, pm_ctrl);
+       }
+}
+EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
+
+/*
+ * Enable/disable the entire performance monitoring unit.
+ * When we enable the PMU, all pending writes to counters get committed.
+ */
+
+void cbe_enable_pm(u32 cpu)
+{
+       struct cbe_pmd_shadow_regs *shadow_regs;
+       u32 pm_ctrl;
+
+       shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
+       shadow_regs->counter_value_in_latch = 0;
+
+       pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
+       cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm);
+
+void cbe_disable_pm(u32 cpu)
+{
+       u32 pm_ctrl;
+       pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
+       cbe_write_pm(cpu, pm_control, pm_ctrl);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm);
+
+/*
+ * Reading from the trace_buffer.
+ * The trace buffer is two 64-bit registers. Reading from
+ * the second half automatically increments the trace_address.
+ */
+
+void cbe_read_trace_buffer(u32 cpu, u64 *buf)
+{
+       struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
+
+       *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
+       *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
+}
+EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
+
+/*
+ * Enabling/disabling interrupts for the entire performance monitoring unit.
+ */
+
+u32 cbe_query_pm_interrupts(u32 cpu)
+{
+       return cbe_read_pm(cpu, pm_status);
+}
+EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
+
+u32 cbe_clear_pm_interrupts(u32 cpu)
+{
+       /* Reading pm_status clears the interrupt bits. */
+       return cbe_query_pm_interrupts(cpu);
+}
+EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
+
+void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
+{
+       /* Set which node and thread will handle the next interrupt. */
+       iic_set_interrupt_routing(cpu, thread, 0);
+
+       /* Enable the interrupt bits in the pm_status register. */
+       if (mask)
+               cbe_write_pm(cpu, pm_status, mask);
+}
+EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
+
+void cbe_disable_pm_interrupts(u32 cpu)
+{
+       cbe_clear_pm_interrupts(cpu);
+       cbe_write_pm(cpu, pm_status, 0);
+}
+EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
+
+static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
+{
+       perf_irq(get_irq_regs());
+       return IRQ_HANDLED;
+}
+
+int __init cbe_init_pm_irq(void)
+{
+       unsigned int irq;
+       int rc, node;
+
+       for_each_node(node) {
+               irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
+                                              (node << IIC_IRQ_NODE_SHIFT));
+               if (irq == NO_IRQ) {
+                       printk("ERROR: Unable to allocate irq for node %d\n",
+                              node);
+                       return -EINVAL;
+               }
+
+               rc = request_irq(irq, cbe_pm_irq,
+                                IRQF_DISABLED, "cbe-pmu-0", NULL);
+               if (rc) {
+                       printk("ERROR: Request for irq on node %d failed\n",
+                              node);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+arch_initcall(cbe_init_pm_irq);
+
+void cbe_sync_irq(int node)
+{
+       unsigned int irq;
+
+       irq = irq_find_mapping(NULL,
+                              IIC_IRQ_IOEX_PMI
+                              | (node << IIC_IRQ_NODE_SHIFT));
+
+       if (irq == NO_IRQ) {
+               printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
+               "for node %d\n", irq, node);
+               return;
+       }
+
+       synchronize_irq(irq);
+}
+EXPORT_SYMBOL_GPL(cbe_sync_irq);
+
index 22c228a49c3373af2cd287b325cea089ad961efa..36989c2eee665546d9b0370ec03e7e8331b678d8 100644 (file)
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <asm/of_platform.h>
 
 #include "interrupt.h"
-#include "iommu.h"
 #include "cbe_regs.h"
 #include "pervasive.h"
 #include "ras.h"
@@ -80,24 +81,72 @@ static void cell_progress(char *s, unsigned short hex)
        printk("*** %04x : %s\n", hex, s ? s : "");
 }
 
-static void __init cell_pcibios_fixup(void)
+static int __init cell_publish_devices(void)
 {
-       struct pci_dev *dev = NULL;
+       if (!machine_is(cell))
+               return 0;
+
+       /* Publish OF platform devices for southbridge IOs */
+       of_platform_bus_probe(NULL, NULL, NULL);
+
+       return 0;
+}
+device_initcall(cell_publish_devices);
+
+static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
+{
+       struct mpic *mpic = desc->handler_data;
+       unsigned int virq;
+
+       virq = mpic_get_one_irq(mpic);
+       if (virq != NO_IRQ)
+               generic_handle_irq(virq);
+       desc->chip->eoi(irq);
+}
 
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
+static void __init mpic_init_IRQ(void)
+{
+       struct device_node *dn;
+       struct mpic *mpic;
+       unsigned int virq;
+
+       for (dn = NULL;
+            (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
+               if (!device_is_compatible(dn, "CBEA,platform-open-pic"))
+                       continue;
+
+               /* The MPIC driver will get everything it needs from the
+                * device-tree, just pass 0 to all arguments
+                */
+               mpic = mpic_alloc(dn, 0, 0, 0, 0, " MPIC     ");
+               if (mpic == NULL)
+                       continue;
+               mpic_init(mpic);
+
+               virq = irq_of_parse_and_map(dn, 0);
+               if (virq == NO_IRQ)
+                       continue;
+
+               printk(KERN_INFO "%s : hooking up to IRQ %d\n",
+                      dn->full_name, virq);
+               set_irq_data(virq, mpic);
+               set_irq_chained_handler(virq, cell_mpic_cascade);
+       }
 }
 
+
 static void __init cell_init_irq(void)
 {
        iic_init_IRQ();
        spider_init_IRQ();
+       mpic_init_IRQ();
 }
 
 static void __init cell_setup_arch(void)
 {
 #ifdef CONFIG_SPU_BASE
-       spu_priv1_ops         = &spu_priv1_mmio_ops;
+       spu_priv1_ops = &spu_priv1_mmio_ops;
+       spu_management_ops = &spu_management_of_ops;
 #endif
 
        cbe_regs_init();
@@ -109,7 +158,6 @@ static void __init cell_setup_arch(void)
 #ifdef CONFIG_SMP
        smp_init_cell();
 #endif
-
        /* init to some ~sane value until calibrate_delay() runs */
        loops_per_jiffy = 50000000;
 
@@ -129,19 +177,6 @@ static void __init cell_setup_arch(void)
        mmio_nvram_init();
 }
 
-/*
- * Early initialization.  Relocation is on but do not reference unbolted pages
- */
-static void __init cell_init_early(void)
-{
-       DBG(" -> cell_init_early()\n");
-
-       cell_init_iommu();
-
-       DBG(" <- cell_init_early()\n");
-}
-
-
 static int __init cell_probe(void)
 {
        unsigned long root = of_get_flat_dt_root();
@@ -168,7 +203,6 @@ define_machine(cell) {
        .name                   = "Cell",
        .probe                  = cell_probe,
        .setup_arch             = cell_setup_arch,
-       .init_early             = cell_init_early,
        .show_cpuinfo           = cell_show_cpuinfo,
        .restart                = rtas_restart,
        .power_off              = rtas_power_off,
@@ -180,7 +214,7 @@ define_machine(cell) {
        .check_legacy_ioport    = cell_check_legacy_ioport,
        .progress               = cell_progress,
        .init_IRQ               = cell_init_irq,
-       .pcibios_fixup          = cell_pcibios_fixup,
+       .pci_setup_phb          = rtas_setup_phb,
 #ifdef CONFIG_KEXEC
        .machine_kexec          = default_machine_kexec,
        .machine_kexec_prepare  = default_machine_kexec_prepare,
index 7aa809d5a244230f14ce6a9042efc918063409cf..bd7bffc3ddd08be19caf0a15572b5e727c6f0cd2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
-
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/prom.h>
+#include <linux/mm.h>
+#include <linux/io.h>
 #include <linux/mutex.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
-#include <asm/mmu_context.h>
-
-#include "interrupt.h"
+#include <asm/xmon.h>
 
+const struct spu_management_ops *spu_management_ops;
 const struct spu_priv1_ops *spu_priv1_ops;
 
 EXPORT_SYMBOL_GPL(spu_priv1_ops);
@@ -89,7 +84,30 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
                printk("%s: invalid access during switch!\n", __func__);
                return 1;
        }
-       if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
+       esid = (ea & ESID_MASK) | SLB_ESID_V;
+
+       switch(REGION_ID(ea)) {
+       case USER_REGION_ID:
+#ifdef CONFIG_HUGETLB_PAGE
+               if (in_hugepage_area(mm->context, ea))
+                       llp = mmu_psize_defs[mmu_huge_psize].sllp;
+               else
+#endif
+                       llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+               vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
+                               SLB_VSID_USER | llp;
+               break;
+       case VMALLOC_REGION_ID:
+               llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+               vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+                       SLB_VSID_KERNEL | llp;
+               break;
+       case KERNEL_REGION_ID:
+               llp = mmu_psize_defs[mmu_linear_psize].sllp;
+               vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
+                       SLB_VSID_KERNEL | llp;
+               break;
+       default:
                /* Future: support kernel segments so that drivers
                 * can use SPUs.
                 */
@@ -97,16 +115,6 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
                return 1;
        }
 
-       esid = (ea & ESID_MASK) | SLB_ESID_V;
-#ifdef CONFIG_HUGETLB_PAGE
-       if (in_hugepage_area(mm->context, ea))
-               llp = mmu_psize_defs[mmu_huge_psize].sllp;
-       else
-#endif
-               llp = mmu_psize_defs[mmu_virtual_psize].sllp;
-       vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
-                       SLB_VSID_USER | llp;
-
        out_be64(&priv2->slb_index_W, spu->slb_replace);
        out_be64(&priv2->slb_vsid_RW, vsid);
        out_be64(&priv2->slb_esid_RW, esid);
@@ -320,6 +328,7 @@ static void spu_free_irqs(struct spu *spu)
 }
 
 static struct list_head spu_list[MAX_NUMNODES];
+static LIST_HEAD(spu_full_list);
 static DEFINE_MUTEX(spu_mutex);
 
 static void spu_init_channels(struct spu *spu)
@@ -364,8 +373,7 @@ struct spu *spu_alloc_node(int node)
        if (!list_empty(&spu_list[node])) {
                spu = list_entry(spu_list[node].next, struct spu, list);
                list_del_init(&spu->list);
-               pr_debug("Got SPU %x %d %d\n",
-                        spu->isrc, spu->number, spu->node);
+               pr_debug("Got SPU %d %d\n", spu->number, spu->node);
                spu_init_channels(spu);
        }
        mutex_unlock(&spu_mutex);
@@ -493,280 +501,65 @@ int spu_irq_class_1_bottom(struct spu *spu)
        if (!error) {
                spu_restart_dma(spu);
        } else {
-               __spu_trap_invalid_dma(spu);
+               spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
        }
        return ret;
 }
 
-static int __init find_spu_node_id(struct device_node *spe)
-{
-       const unsigned int *id;
-       struct device_node *cpu;
-       cpu = spe->parent->parent;
-       id = get_property(cpu, "node-id", NULL);
-       return id ? *id : 0;
-}
-
-static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
-               const char *prop)
-{
-       static DEFINE_MUTEX(add_spumem_mutex);
-
-       const struct address_prop {
-               unsigned long address;
-               unsigned int len;
-       } __attribute__((packed)) *p;
-       int proplen;
-
-       unsigned long start_pfn, nr_pages;
-       struct pglist_data *pgdata;
-       struct zone *zone;
-       int ret;
-
-       p = get_property(spe, prop, &proplen);
-       WARN_ON(proplen != sizeof (*p));
-
-       start_pfn = p->address >> PAGE_SHIFT;
-       nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
-       pgdata = NODE_DATA(spu->nid);
-       zone = pgdata->node_zones;
-
-       /* XXX rethink locking here */
-       mutex_lock(&add_spumem_mutex);
-       ret = __add_pages(zone, start_pfn, nr_pages);
-       mutex_unlock(&add_spumem_mutex);
-
-       return ret;
-}
+struct sysdev_class spu_sysdev_class = {
+       set_kset_name("spu")
+};
 
-static void __iomem * __init map_spe_prop(struct spu *spu,
-               struct device_node *n, const char *name)
+int spu_add_sysdev_attr(struct sysdev_attribute *attr)
 {
-       const struct address_prop {
-               unsigned long address;
-               unsigned int len;
-       } __attribute__((packed)) *prop;
-
-       const void *p;
-       int proplen;
-       void __iomem *ret = NULL;
-       int err = 0;
-
-       p = get_property(n, name, &proplen);
-       if (proplen != sizeof (struct address_prop))
-               return NULL;
-
-       prop = p;
-
-       err = cell_spuprop_present(spu, n, name);
-       if (err && (err != -EEXIST))
-               goto out;
-
-       ret = ioremap(prop->address, prop->len);
-
- out:
-       return ret;
-}
+       struct spu *spu;
+       mutex_lock(&spu_mutex);
 
-static void spu_unmap(struct spu *spu)
-{
-       iounmap(spu->priv2);
-       iounmap(spu->priv1);
-       iounmap(spu->problem);
-       iounmap((__force u8 __iomem *)spu->local_store);
-}
+       list_for_each_entry(spu, &spu_full_list, full_list)
+               sysdev_create_file(&spu->sysdev, attr);
 
-/* This function shall be abstracted for HV platforms */
-static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
-{
-       unsigned int isrc;
-       const u32 *tmp;
-
-       /* Get the interrupt source unit from the device-tree */
-       tmp = get_property(np, "isrc", NULL);
-       if (!tmp)
-               return -ENODEV;
-       isrc = tmp[0];
-
-       /* Add the node number */
-       isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
-       spu->isrc = isrc;
-
-       /* Now map interrupts of all 3 classes */
-       spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
-       spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
-       spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
-
-       /* Right now, we only fail if class 2 failed */
-       return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+       mutex_unlock(&spu_mutex);
+       return 0;
 }
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
 
-static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+int spu_add_sysdev_attr_group(struct attribute_group *attrs)
 {
-       const char *prop;
-       int ret;
-
-       ret = -ENODEV;
-       spu->name = get_property(node, "name", NULL);
-       if (!spu->name)
-               goto out;
-
-       prop = get_property(node, "local-store", NULL);
-       if (!prop)
-               goto out;
-       spu->local_store_phys = *(unsigned long *)prop;
-
-       /* we use local store as ram, not io memory */
-       spu->local_store = (void __force *)
-               map_spe_prop(spu, node, "local-store");
-       if (!spu->local_store)
-               goto out;
-
-       prop = get_property(node, "problem", NULL);
-       if (!prop)
-               goto out_unmap;
-       spu->problem_phys = *(unsigned long *)prop;
-
-       spu->problem= map_spe_prop(spu, node, "problem");
-       if (!spu->problem)
-               goto out_unmap;
-
-       spu->priv1= map_spe_prop(spu, node, "priv1");
-       /* priv1 is not available on a hypervisor */
-
-       spu->priv2= map_spe_prop(spu, node, "priv2");
-       if (!spu->priv2)
-               goto out_unmap;
-       ret = 0;
-       goto out;
-
-out_unmap:
-       spu_unmap(spu);
-out:
-       return ret;
-}
+       struct spu *spu;
+       mutex_lock(&spu_mutex);
 
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
-{
-       struct of_irq oirq;
-       int ret;
-       int i;
+       list_for_each_entry(spu, &spu_full_list, full_list)
+               sysfs_create_group(&spu->sysdev.kobj, attrs);
 
-       for (i=0; i < 3; i++) {
-               ret = of_irq_map_one(np, i, &oirq);
-               if (ret) {
-                       pr_debug("spu_new: failed to get irq %d\n", i);
-                       goto err;
-               }
-               ret = -EINVAL;
-               pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
-                        oirq.controller->full_name);
-               spu->irqs[i] = irq_create_of_mapping(oirq.controller,
-                                       oirq.specifier, oirq.size);
-               if (spu->irqs[i] == NO_IRQ) {
-                       pr_debug("spu_new: failed to map it !\n");
-                       goto err;
-               }
-       }
+       mutex_unlock(&spu_mutex);
        return 0;
-
-err:
-       pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
-       for (; i >= 0; i--) {
-               if (spu->irqs[i] != NO_IRQ)
-                       irq_dispose_mapping(spu->irqs[i]);
-       }
-       return ret;
 }
+EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
 
-static int spu_map_resource(struct device_node *node, int nr,
-               void __iomem** virt, unsigned long *phys)
-{
-       struct resource resource = { };
-       int ret;
-
-       ret = of_address_to_resource(node, nr, &resource);
-       if (ret)
-               goto out;
 
-       if (phys)
-               *phys = resource.start;
-       *virt = ioremap(resource.start, resource.end - resource.start);
-       if (!*virt)
-               ret = -EINVAL;
-
-out:
-       return ret;
-}
-
-static int __init spu_map_device(struct spu *spu, struct device_node *node)
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
 {
-       int ret = -ENODEV;
-       spu->name = get_property(node, "name", NULL);
-       if (!spu->name)
-               goto out;
-
-       ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
-                                       &spu->local_store_phys);
-       if (ret) {
-               pr_debug("spu_new: failed to map %s resource 0\n",
-                        node->full_name);
-               goto out;
-       }
-       ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
-                                       &spu->problem_phys);
-       if (ret) {
-               pr_debug("spu_new: failed to map %s resource 1\n",
-                        node->full_name);
-               goto out_unmap;
-       }
-       ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
-                                       NULL);
-       if (ret) {
-               pr_debug("spu_new: failed to map %s resource 2\n",
-                        node->full_name);
-               goto out_unmap;
-       }
-
-       if (!firmware_has_feature(FW_FEATURE_LPAR))
-               ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
-                                       NULL);
-       if (ret) {
-               pr_debug("spu_new: failed to map %s resource 3\n",
-                        node->full_name);
-               goto out_unmap;
-       }
-       pr_debug("spu_new: %s maps:\n", node->full_name);
-       pr_debug("  local store   : 0x%016lx -> 0x%p\n",
-                spu->local_store_phys, spu->local_store);
-       pr_debug("  problem state : 0x%016lx -> 0x%p\n",
-                spu->problem_phys, spu->problem);
-       pr_debug("  priv2         :                       0x%p\n", spu->priv2);
-       pr_debug("  priv1         :                       0x%p\n", spu->priv1);
+       struct spu *spu;
+       mutex_lock(&spu_mutex);
 
-       return 0;
+       list_for_each_entry(spu, &spu_full_list, full_list)
+               sysdev_remove_file(&spu->sysdev, attr);
 
-out_unmap:
-       spu_unmap(spu);
-out:
-       pr_debug("failed to map spe %s: %d\n", spu->name, ret);
-       return ret;
+       mutex_unlock(&spu_mutex);
 }
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
 
-struct sysdev_class spu_sysdev_class = {
-       set_kset_name("spu")
-};
-
-static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
 {
-       struct spu *spu = container_of(sysdev, struct spu, sysdev);
-       return sprintf(buf, "%d\n", spu->isrc);
+       struct spu *spu;
+       mutex_lock(&spu_mutex);
 
-}
-static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
+       list_for_each_entry(spu, &spu_full_list, full_list)
+               sysfs_remove_group(&spu->sysdev.kobj, attrs);
 
-extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
+       mutex_unlock(&spu_mutex);
+}
+EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
 
 static int spu_create_sysdev(struct spu *spu)
 {
@@ -781,21 +574,18 @@ static int spu_create_sysdev(struct spu *spu)
                return ret;
        }
 
-       if (spu->isrc != 0)
-               sysdev_create_file(&spu->sysdev, &attr_isrc);
-       sysfs_add_device_to_node(&spu->sysdev, spu->nid);
+       sysfs_add_device_to_node(&spu->sysdev, spu->node);
 
        return 0;
 }
 
 static void spu_destroy_sysdev(struct spu *spu)
 {
-       sysdev_remove_file(&spu->sysdev, &attr_isrc);
-       sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
+       sysfs_remove_device_from_node(&spu->sysdev, spu->node);
        sysdev_unregister(&spu->sysdev);
 }
 
-static int __init create_spu(struct device_node *spe)
+static int __init create_spu(void *data)
 {
        struct spu *spu;
        int ret;
@@ -806,57 +596,37 @@ static int __init create_spu(struct device_node *spe)
        if (!spu)
                goto out;
 
-       spu->node = find_spu_node_id(spe);
-       if (spu->node >= MAX_NUMNODES) {
-               printk(KERN_WARNING "SPE %s on node %d ignored,"
-                      " node number too big\n", spe->full_name, spu->node);
-               printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
-               return -ENODEV;
-       }
-       spu->nid = of_node_to_nid(spe);
-       if (spu->nid == -1)
-               spu->nid = 0;
+       spin_lock_init(&spu->register_lock);
+       mutex_lock(&spu_mutex);
+       spu->number = number++;
+       mutex_unlock(&spu_mutex);
+
+       ret = spu_create_spu(spu, data);
 
-       ret = spu_map_device(spu, spe);
-       /* try old method */
-       if (ret)
-               ret = spu_map_device_old(spu, spe);
        if (ret)
                goto out_free;
 
-       ret = spu_map_interrupts(spu, spe);
-       if (ret)
-               ret = spu_map_interrupts_old(spu, spe);
-       if (ret)
-               goto out_unmap;
-       spin_lock_init(&spu->register_lock);
-       spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
+       spu_mfc_sdr_setup(spu);
        spu_mfc_sr1_set(spu, 0x33);
-       mutex_lock(&spu_mutex);
-
-       spu->number = number++;
        ret = spu_request_irqs(spu);
        if (ret)
-               goto out_unlock;
+               goto out_destroy;
 
        ret = spu_create_sysdev(spu);
        if (ret)
                goto out_free_irqs;
 
+       mutex_lock(&spu_mutex);
        list_add(&spu->list, &spu_list[spu->node]);
+       list_add(&spu->full_list, &spu_full_list);
        mutex_unlock(&spu_mutex);
 
-       pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
-               spu->name, spu->isrc, spu->local_store,
-               spu->problem, spu->priv1, spu->priv2, spu->number);
        goto out;
 
 out_free_irqs:
        spu_free_irqs(spu);
-out_unlock:
-       mutex_unlock(&spu_mutex);
-out_unmap:
-       spu_unmap(spu);
+out_destroy:
+       spu_destroy_spu(spu);
 out_free:
        kfree(spu);
 out:
@@ -866,10 +636,11 @@ out:
 static void destroy_spu(struct spu *spu)
 {
        list_del_init(&spu->list);
+       list_del_init(&spu->full_list);
 
        spu_destroy_sysdev(spu);
        spu_free_irqs(spu);
-       spu_unmap(spu);
+       spu_destroy_spu(spu);
        kfree(spu);
 }
 
@@ -890,9 +661,11 @@ module_exit(cleanup_spu_base);
 
 static int __init init_spu_base(void)
 {
-       struct device_node *node;
        int i, ret;
 
+       if (!spu_management_ops)
+               return 0;
+
        /* create sysdev class for spus */
        ret = sysdev_class_register(&spu_sysdev_class);
        if (ret)
@@ -901,17 +674,17 @@ static int __init init_spu_base(void)
        for (i = 0; i < MAX_NUMNODES; i++)
                INIT_LIST_HEAD(&spu_list[i]);
 
-       ret = -ENODEV;
-       for (node = of_find_node_by_type(NULL, "spe");
-                       node; node = of_find_node_by_type(node, "spe")) {
-               ret = create_spu(node);
-               if (ret) {
-                       printk(KERN_WARNING "%s: Error initializing %s\n",
-                               __FUNCTION__, node->name);
-                       cleanup_spu_base();
-                       break;
-               }
+       ret = spu_enumerate_spus(create_spu);
+
+       if (ret) {
+               printk(KERN_WARNING "%s: Error initializing spus\n",
+                       __FUNCTION__);
+               cleanup_spu_base();
+               return ret;
        }
+
+       xmon_register_spus(&spu_full_list);
+
        return ret;
 }
 module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spu_coredump.c b/arch/powerpc/platforms/cell/spu_coredump.c
new file mode 100644 (file)
index 0000000..6915b41
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/spu.h>
+
+static struct spu_coredump_calls spu_coredump_calls;
+static DEFINE_MUTEX(spu_coredump_mutex);
+
+int arch_notes_size(void)
+{
+       long ret;
+       struct module *owner = spu_coredump_calls.owner;
+
+       ret = -ENOSYS;
+       mutex_lock(&spu_coredump_mutex);
+       if (owner && try_module_get(owner)) {
+               ret = spu_coredump_calls.arch_notes_size();
+               module_put(owner);
+       }
+       mutex_unlock(&spu_coredump_mutex);
+       return ret;
+}
+
+void arch_write_notes(struct file *file)
+{
+       struct module *owner = spu_coredump_calls.owner;
+
+       mutex_lock(&spu_coredump_mutex);
+       if (owner && try_module_get(owner)) {
+               spu_coredump_calls.arch_write_notes(file);
+               module_put(owner);
+       }
+       mutex_unlock(&spu_coredump_mutex);
+}
+
+int register_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+       if (spu_coredump_calls.owner)
+               return -EBUSY;
+
+       mutex_lock(&spu_coredump_mutex);
+       spu_coredump_calls.arch_notes_size = calls->arch_notes_size;
+       spu_coredump_calls.arch_write_notes = calls->arch_write_notes;
+       spu_coredump_calls.owner = calls->owner;
+       mutex_unlock(&spu_coredump_mutex);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(register_arch_coredump_calls);
+
+void unregister_arch_coredump_calls(struct spu_coredump_calls *calls)
+{
+       BUG_ON(spu_coredump_calls.owner != calls->owner);
+
+       mutex_lock(&spu_coredump_mutex);
+       spu_coredump_calls.owner = NULL;
+       mutex_unlock(&spu_coredump_mutex);
+}
+EXPORT_SYMBOL_GPL(unregister_arch_coredump_calls);
index 71b69f0a1a48712b76b2c5d6f43a52f99699a5f3..a5de0430c56d8974332a38a5d21c680d3297e7c5 100644 (file)
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/interrupt.h>
+#include <linux/list.h>
 #include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
 
-#include <asm/io.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
+#include <asm/firmware.h>
+#include <asm/prom.h>
 
 #include "interrupt.h"
+#include "spu_priv1_mmio.h"
+
+struct spu_pdata {
+       int nid;
+       struct device_node *devnode;
+       struct spu_priv1 __iomem *priv1;
+};
+
+static struct spu_pdata *spu_get_pdata(struct spu *spu)
+{
+       BUG_ON(!spu->pdata);
+       return spu->pdata;
+}
+
+struct device_node *spu_devnode(struct spu *spu)
+{
+       return spu_get_pdata(spu)->devnode;
+}
+
+EXPORT_SYMBOL_GPL(spu_devnode);
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+       const unsigned int *id;
+       struct device_node *cpu;
+       cpu = spe->parent->parent;
+       id = get_property(cpu, "node-id", NULL);
+       return id ? *id : 0;
+}
+
+static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
+               const char *prop)
+{
+       static DEFINE_MUTEX(add_spumem_mutex);
+
+       const struct address_prop {
+               unsigned long address;
+               unsigned int len;
+       } __attribute__((packed)) *p;
+       int proplen;
+
+       unsigned long start_pfn, nr_pages;
+       struct pglist_data *pgdata;
+       struct zone *zone;
+       int ret;
+
+       p = get_property(spe, prop, &proplen);
+       WARN_ON(proplen != sizeof (*p));
+
+       start_pfn = p->address >> PAGE_SHIFT;
+       nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       pgdata = NODE_DATA(spu_get_pdata(spu)->nid);
+       zone = pgdata->node_zones;
+
+       /* XXX rethink locking here */
+       mutex_lock(&add_spumem_mutex);
+       ret = __add_pages(zone, start_pfn, nr_pages);
+       mutex_unlock(&add_spumem_mutex);
+
+       return ret;
+}
+
+static void __iomem * __init map_spe_prop(struct spu *spu,
+               struct device_node *n, const char *name)
+{
+       const struct address_prop {
+               unsigned long address;
+               unsigned int len;
+       } __attribute__((packed)) *prop;
+
+       const void *p;
+       int proplen;
+       void __iomem *ret = NULL;
+       int err = 0;
+
+       p = get_property(n, name, &proplen);
+       if (proplen != sizeof (struct address_prop))
+               return NULL;
+
+       prop = p;
+
+       err = cell_spuprop_present(spu, n, name);
+       if (err && (err != -EEXIST))
+               goto out;
+
+       ret = ioremap(prop->address, prop->len);
+
+ out:
+       return ret;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+       iounmap(spu->priv2);
+       iounmap(spu_get_pdata(spu)->priv1);
+       iounmap(spu->problem);
+       iounmap((__force u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_interrupts_old(struct spu *spu,
+       struct device_node *np)
+{
+       unsigned int isrc;
+       const u32 *tmp;
+
+       /* Get the interrupt source unit from the device-tree */
+       tmp = get_property(np, "isrc", NULL);
+       if (!tmp)
+               return -ENODEV;
+       isrc = tmp[0];
+
+       /* Add the node number */
+       isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
+
+       /* Now map interrupts of all 3 classes */
+       spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
+       spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
+       spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
+
+       /* Right now, we only fail if class 2 failed */
+       return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
+}
+
+static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
+{
+       const char *prop;
+       int ret;
+
+       ret = -ENODEV;
+       spu->name = get_property(node, "name", NULL);
+       if (!spu->name)
+               goto out;
+
+       prop = get_property(node, "local-store", NULL);
+       if (!prop)
+               goto out;
+       spu->local_store_phys = *(unsigned long *)prop;
+
+       /* we use local store as ram, not io memory */
+       spu->local_store = (void __force *)
+               map_spe_prop(spu, node, "local-store");
+       if (!spu->local_store)
+               goto out;
+
+       prop = get_property(node, "problem", NULL);
+       if (!prop)
+               goto out_unmap;
+       spu->problem_phys = *(unsigned long *)prop;
+
+       spu->problem= map_spe_prop(spu, node, "problem");
+       if (!spu->problem)
+               goto out_unmap;
+
+       spu_get_pdata(spu)->priv1= map_spe_prop(spu, node, "priv1");
+
+       spu->priv2= map_spe_prop(spu, node, "priv2");
+       if (!spu->priv2)
+               goto out_unmap;
+       ret = 0;
+       goto out;
+
+out_unmap:
+       spu_unmap(spu);
+out:
+       return ret;
+}
+
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+       struct of_irq oirq;
+       int ret;
+       int i;
+
+       for (i=0; i < 3; i++) {
+               ret = of_irq_map_one(np, i, &oirq);
+               if (ret) {
+                       pr_debug("spu_new: failed to get irq %d\n", i);
+                       goto err;
+               }
+               ret = -EINVAL;
+               pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
+                        oirq.controller->full_name);
+               spu->irqs[i] = irq_create_of_mapping(oirq.controller,
+                                       oirq.specifier, oirq.size);
+               if (spu->irqs[i] == NO_IRQ) {
+                       pr_debug("spu_new: failed to map it !\n");
+                       goto err;
+               }
+       }
+       return 0;
+
+err:
+       pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+               spu->name);
+       for (; i >= 0; i--) {
+               if (spu->irqs[i] != NO_IRQ)
+                       irq_dispose_mapping(spu->irqs[i]);
+       }
+       return ret;
+}
+
+static int spu_map_resource(struct device_node *node, int nr,
+               void __iomem** virt, unsigned long *phys)
+{
+       struct resource resource = { };
+       int ret;
+
+       ret = of_address_to_resource(node, nr, &resource);
+       if (ret)
+               goto out;
+
+       if (phys)
+               *phys = resource.start;
+       *virt = ioremap(resource.start, resource.end - resource.start);
+       if (!*virt)
+               ret = -EINVAL;
+
+out:
+       return ret;
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *node)
+{
+       int ret = -ENODEV;
+       spu->name = get_property(node, "name", NULL);
+       if (!spu->name)
+               goto out;
+
+       ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
+                                       &spu->local_store_phys);
+       if (ret) {
+               pr_debug("spu_new: failed to map %s resource 0\n",
+                        node->full_name);
+               goto out;
+       }
+       ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
+                                       &spu->problem_phys);
+       if (ret) {
+               pr_debug("spu_new: failed to map %s resource 1\n",
+                        node->full_name);
+               goto out_unmap;
+       }
+       ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
+                                       NULL);
+       if (ret) {
+               pr_debug("spu_new: failed to map %s resource 2\n",
+                        node->full_name);
+               goto out_unmap;
+       }
+       if (!firmware_has_feature(FW_FEATURE_LPAR))
+               ret = spu_map_resource(node, 3,
+                       (void __iomem**)&spu_get_pdata(spu)->priv1, NULL);
+       if (ret) {
+               pr_debug("spu_new: failed to map %s resource 3\n",
+                        node->full_name);
+               goto out_unmap;
+       }
+       pr_debug("spu_new: %s maps:\n", node->full_name);
+       pr_debug("  local store   : 0x%016lx -> 0x%p\n",
+                spu->local_store_phys, spu->local_store);
+       pr_debug("  problem state : 0x%016lx -> 0x%p\n",
+                spu->problem_phys, spu->problem);
+       pr_debug("  priv2         :                       0x%p\n", spu->priv2);
+       pr_debug("  priv1         :                       0x%p\n",
+                                               spu_get_pdata(spu)->priv1);
+
+       return 0;
+
+out_unmap:
+       spu_unmap(spu);
+out:
+       pr_debug("failed to map spe %s: %d\n", spu->name, ret);
+       return ret;
+}
+
+static int __init of_enumerate_spus(int (*fn)(void *data))
+{
+       int ret;
+       struct device_node *node;
+
+       ret = -ENODEV;
+       for (node = of_find_node_by_type(NULL, "spe");
+                       node; node = of_find_node_by_type(node, "spe")) {
+               ret = fn(node);
+               if (ret) {
+                       printk(KERN_WARNING "%s: Error initializing %s\n",
+                               __FUNCTION__, node->name);
+                       break;
+               }
+       }
+       return ret;
+}
+
+static int __init of_create_spu(struct spu *spu, void *data)
+{
+       int ret;
+       struct device_node *spe = (struct device_node *)data;
+
+       spu->pdata = kzalloc(sizeof(struct spu_pdata),
+               GFP_KERNEL);
+       if (!spu->pdata) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       spu->node = find_spu_node_id(spe);
+       if (spu->node >= MAX_NUMNODES) {
+               printk(KERN_WARNING "SPE %s on node %d ignored,"
+                      " node number too big\n", spe->full_name, spu->node);
+               printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+               ret = -ENODEV;
+               goto out_free;
+       }
+
+       spu_get_pdata(spu)->nid = of_node_to_nid(spe);
+       if (spu_get_pdata(spu)->nid == -1)
+               spu_get_pdata(spu)->nid = 0;
+
+       ret = spu_map_device(spu, spe);
+       /* try old method */
+       if (ret)
+               ret = spu_map_device_old(spu, spe);
+       if (ret)
+               goto out_free;
+
+       ret = spu_map_interrupts(spu, spe);
+       if (ret)
+               ret = spu_map_interrupts_old(spu, spe);
+       if (ret)
+               goto out_unmap;
+
+       spu_get_pdata(spu)->devnode = of_node_get(spe);
+
+       pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", spu->name,
+               spu->local_store, spu->problem, spu_get_pdata(spu)->priv1,
+               spu->priv2, spu->number);
+       goto out;
+
+out_unmap:
+       spu_unmap(spu);
+out_free:
+       kfree(spu->pdata);
+       spu->pdata = NULL;
+out:
+       return ret;
+}
+
+static int of_destroy_spu(struct spu *spu)
+{
+       spu_unmap(spu);
+       of_node_put(spu_get_pdata(spu)->devnode);
+       kfree(spu->pdata);
+       spu->pdata = NULL;
+       return 0;
+}
+
+const struct spu_management_ops spu_management_of_ops = {
+       .enumerate_spus = of_enumerate_spus,
+       .create_spu = of_create_spu,
+       .destroy_spu = of_destroy_spu,
+};
 
 static void int_mask_and(struct spu *spu, int class, u64 mask)
 {
        u64 old_mask;
 
-       old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
-       out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+       old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+       out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+               old_mask & mask);
 }
 
 static void int_mask_or(struct spu *spu, int class, u64 mask)
 {
        u64 old_mask;
 
-       old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
-       out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+       old_mask = in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
+       out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class],
+               old_mask | mask);
 }
 
 static void int_mask_set(struct spu *spu, int class, u64 mask)
 {
-       out_be64(&spu->priv1->int_mask_RW[class], mask);
+       out_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class], mask);
 }
 
 static u64 int_mask_get(struct spu *spu, int class)
 {
-       return in_be64(&spu->priv1->int_mask_RW[class]);
+       return in_be64(&spu_get_pdata(spu)->priv1->int_mask_RW[class]);
 }
 
 static void int_stat_clear(struct spu *spu, int class, u64 stat)
 {
-       out_be64(&spu->priv1->int_stat_RW[class], stat);
+       out_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class], stat);
 }
 
 static u64 int_stat_get(struct spu *spu, int class)
 {
-       return in_be64(&spu->priv1->int_stat_RW[class]);
+       return in_be64(&spu_get_pdata(spu)->priv1->int_stat_RW[class]);
 }
 
 static void cpu_affinity_set(struct spu *spu, int cpu)
 {
        u64 target = iic_get_target_id(cpu);
        u64 route = target << 48 | target << 32 | target << 16;
-       out_be64(&spu->priv1->int_route_RW, route);
+       out_be64(&spu_get_pdata(spu)->priv1->int_route_RW, route);
 }
 
 static u64 mfc_dar_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->mfc_dar_RW);
+       return in_be64(&spu_get_pdata(spu)->priv1->mfc_dar_RW);
 }
 
 static u64 mfc_dsisr_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->mfc_dsisr_RW);
+       return in_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW);
 }
 
 static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
 {
-       out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+       out_be64(&spu_get_pdata(spu)->priv1->mfc_dsisr_RW, dsisr);
 }
 
-static void mfc_sdr_set(struct spu *spu, u64 sdr)
+static void mfc_sdr_setup(struct spu *spu)
 {
-       out_be64(&spu->priv1->mfc_sdr_RW, sdr);
+       out_be64(&spu_get_pdata(spu)->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
 }
 
 static void mfc_sr1_set(struct spu *spu, u64 sr1)
 {
-       out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+       out_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW, sr1);
 }
 
 static u64 mfc_sr1_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->mfc_sr1_RW);
+       return in_be64(&spu_get_pdata(spu)->priv1->mfc_sr1_RW);
 }
 
 static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
 {
-       out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+       out_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW, tclass_id);
 }
 
 static u64 mfc_tclass_id_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->mfc_tclass_id_RW);
+       return in_be64(&spu_get_pdata(spu)->priv1->mfc_tclass_id_RW);
 }
 
 static void tlb_invalidate(struct spu *spu)
 {
-       out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+       out_be64(&spu_get_pdata(spu)->priv1->tlb_invalidate_entry_W, 0ul);
 }
 
 static void resource_allocation_groupID_set(struct spu *spu, u64 id)
 {
-       out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+       out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW,
+               id);
 }
 
 static u64 resource_allocation_groupID_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+       return in_be64(
+               &spu_get_pdata(spu)->priv1->resource_allocation_groupID_RW);
 }
 
 static void resource_allocation_enable_set(struct spu *spu, u64 enable)
 {
-       out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+       out_be64(&spu_get_pdata(spu)->priv1->resource_allocation_enable_RW,
+               enable);
 }
 
 static u64 resource_allocation_enable_get(struct spu *spu)
 {
-       return in_be64(&spu->priv1->resource_allocation_enable_RW);
+       return in_be64(
+               &spu_get_pdata(spu)->priv1->resource_allocation_enable_RW);
 }
 
 const struct spu_priv1_ops spu_priv1_mmio_ops =
@@ -146,7 +524,7 @@ const struct spu_priv1_ops spu_priv1_mmio_ops =
        .mfc_dar_get = mfc_dar_get,
        .mfc_dsisr_get = mfc_dsisr_get,
        .mfc_dsisr_set = mfc_dsisr_set,
-       .mfc_sdr_set = mfc_sdr_set,
+       .mfc_sdr_setup = mfc_sdr_setup,
        .mfc_sr1_set = mfc_sr1_set,
        .mfc_sr1_get = mfc_sr1_get,
        .mfc_tclass_id_set = mfc_tclass_id_set,
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
new file mode 100644 (file)
index 0000000..7b62bd1
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * spu hypervisor abstraction for direct hardware access.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef SPU_PRIV1_MMIO_H
+#define SPU_PRIV1_MMIO_H
+
+struct device_node *spu_devnode(struct spu *spu);
+
+#endif /* SPU_PRIV1_MMIO_H */
index ecdfbb35f82e9ab6af69ba850edfd53268e53139..472217d19faf5b7ac0dd7d352cbeb77510d57513 100644 (file)
@@ -1,7 +1,7 @@
 obj-y += switch.o
 
 obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o
+spufs-y += inode.o file.o context.o syscalls.o coredump.o
 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
 
 # Rules to build switch.o with the help of SPU tool chain
index 2d22cd59d6fc40ae0a617b3e4d72538aa91139b3..1898f0d3a8b876aaafe246c845b82189c298986c 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/io.h>
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
+#include <asm/spu_info.h>
 #include <asm/mmu_context.h>
 #include "spufs.h"
 
@@ -267,6 +268,11 @@ static char *spu_backing_get_ls(struct spu_context *ctx)
        return ctx->csa.lscsa->ls;
 }
 
+static u32 spu_backing_runcntl_read(struct spu_context *ctx)
+{
+       return ctx->csa.prob.spu_runcntl_RW;
+}
+
 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
 {
        spin_lock(&ctx->csa.register_lock);
@@ -279,9 +285,26 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
        spin_unlock(&ctx->csa.register_lock);
 }
 
-static void spu_backing_runcntl_stop(struct spu_context *ctx)
+static void spu_backing_master_start(struct spu_context *ctx)
+{
+       struct spu_state *csa = &ctx->csa;
+       u64 sr1;
+
+       spin_lock(&csa->register_lock);
+       sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+       csa->priv1.mfc_sr1_RW = sr1;
+       spin_unlock(&csa->register_lock);
+}
+
+static void spu_backing_master_stop(struct spu_context *ctx)
 {
-       spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
+       struct spu_state *csa = &ctx->csa;
+       u64 sr1;
+
+       spin_lock(&csa->register_lock);
+       sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+       csa->priv1.mfc_sr1_RW = sr1;
+       spin_unlock(&csa->register_lock);
 }
 
 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
@@ -345,8 +368,10 @@ struct spu_context_ops spu_backing_ops = {
        .npc_write = spu_backing_npc_write,
        .status_read = spu_backing_status_read,
        .get_ls = spu_backing_get_ls,
+       .runcntl_read = spu_backing_runcntl_read,
        .runcntl_write = spu_backing_runcntl_write,
-       .runcntl_stop = spu_backing_runcntl_stop,
+       .master_start = spu_backing_master_start,
+       .master_stop = spu_backing_master_stop,
        .set_mfc_query = spu_backing_set_mfc_query,
        .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
        .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
index 034cf6af53a2f6df6783710d74bbefeeca863fe1..0870009f56db64e7e535bf61c70d393e95684605 100644 (file)
@@ -120,6 +120,33 @@ void spu_unmap_mappings(struct spu_context *ctx)
                unmap_mapping_range(ctx->signal2, 0, 0x4000, 1);
 }
 
+int spu_acquire_exclusive(struct spu_context *ctx)
+{
+       int ret = 0;
+
+       down_write(&ctx->state_sema);
+       /* ctx is about to be freed, can't acquire any more */
+       if (!ctx->owner) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (ctx->state == SPU_STATE_SAVED) {
+               ret = spu_activate(ctx, 0);
+               if (ret)
+                       goto out;
+               ctx->state = SPU_STATE_RUNNABLE;
+       } else {
+               /* We need to exclude userspace access to the context. */
+               spu_unmap_mappings(ctx);
+       }
+
+out:
+       if (ret)
+               up_write(&ctx->state_sema);
+       return ret;
+}
+
 int spu_acquire_runnable(struct spu_context *ctx)
 {
        int ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
new file mode 100644 (file)
index 0000000..26945c4
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * SPU core dump code
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/elf.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+struct spufs_ctx_info {
+       struct list_head list;
+       int dfd;
+       int memsize; /* in bytes */
+       struct spu_context *ctx;
+};
+
+static LIST_HEAD(ctx_info_list);
+
+static ssize_t do_coredump_read(int num, struct spu_context *ctx, void __user *buffer,
+                               size_t size, loff_t *off)
+{
+       u64 data;
+       int ret;
+
+       if (spufs_coredump_read[num].read)
+               return spufs_coredump_read[num].read(ctx, buffer, size, off);
+
+       data = spufs_coredump_read[num].get(ctx);
+       ret = copy_to_user(buffer, &data, 8);
+       return ret ? -EFAULT : 8;
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+static int spufs_dump_write(struct file *file, const void *addr, int nr)
+{
+       return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+static int spufs_dump_seek(struct file *file, loff_t off)
+{
+       if (file->f_op->llseek) {
+               if (file->f_op->llseek(file, off, 0) != off)
+                       return 0;
+       } else
+               file->f_pos = off;
+       return 1;
+}
+
+static void spufs_fill_memsize(struct spufs_ctx_info *ctx_info)
+{
+       struct spu_context *ctx;
+       unsigned long long lslr;
+
+       ctx = ctx_info->ctx;
+       lslr = ctx->csa.priv2.spu_lslr_RW;
+       ctx_info->memsize = lslr + 1;
+}
+
+static int spufs_ctx_note_size(struct spufs_ctx_info *ctx_info)
+{
+       int dfd, memsize, i, sz, total = 0;
+       char *name;
+       char fullname[80];
+
+       dfd = ctx_info->dfd;
+       memsize = ctx_info->memsize;
+
+       for (i = 0; spufs_coredump_read[i].name; i++) {
+               name = spufs_coredump_read[i].name;
+               sz = spufs_coredump_read[i].size;
+
+               sprintf(fullname, "SPU/%d/%s", dfd, name);
+
+               total += sizeof(struct elf_note);
+               total += roundup(strlen(fullname) + 1, 4);
+               if (!strcmp(name, "mem"))
+                       total += roundup(memsize, 4);
+               else
+                       total += roundup(sz, 4);
+       }
+
+       return total;
+}
+
+static int spufs_add_one_context(struct file *file, int dfd)
+{
+       struct spu_context *ctx;
+       struct spufs_ctx_info *ctx_info;
+       int size;
+
+       ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
+       if (ctx->flags & SPU_CREATE_NOSCHED)
+               return 0;
+
+       ctx_info = kzalloc(sizeof(*ctx_info), GFP_KERNEL);
+       if (unlikely(!ctx_info))
+               return -ENOMEM;
+
+       ctx_info->dfd = dfd;
+       ctx_info->ctx = ctx;
+
+       spufs_fill_memsize(ctx_info);
+
+       size = spufs_ctx_note_size(ctx_info);
+       list_add(&ctx_info->list, &ctx_info_list);
+       return size;
+}
+
+/*
+ * The additional architecture-specific notes for Cell are various
+ * context files in the spu context.
+ *
+ * This function iterates over all open file descriptors and sees
+ * if they are a directory in spufs.  In that case we use spufs
+ * internal functionality to dump them without needing to actually
+ * open the files.
+ */
+static int spufs_arch_notes_size(void)
+{
+       struct fdtable *fdt = files_fdtable(current->files);
+       int size = 0, fd;
+
+       for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
+               if (FD_ISSET(fd, fdt->open_fds)) {
+                       struct file *file = fcheck(fd);
+
+                       if (file && file->f_op == &spufs_context_fops) {
+                               int rval = spufs_add_one_context(file, fd);
+                               if (rval < 0)
+                                       break;
+                               size += rval;
+                       }
+               }
+       }
+
+       return size;
+}
+
+static void spufs_arch_write_note(struct spufs_ctx_info *ctx_info, int i,
+                               struct file *file)
+{
+       struct spu_context *ctx;
+       loff_t pos = 0;
+       int sz, dfd, rc, total = 0;
+       const int bufsz = 4096;
+       char *name;
+       char fullname[80], *buf;
+       struct elf_note en;
+
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return;
+
+       dfd = ctx_info->dfd;
+       name = spufs_coredump_read[i].name;
+
+       if (!strcmp(name, "mem"))
+               sz = ctx_info->memsize;
+       else
+               sz = spufs_coredump_read[i].size;
+
+       ctx = ctx_info->ctx;
+       if (!ctx) {
+               return;
+       }
+
+       sprintf(fullname, "SPU/%d/%s", dfd, name);
+       en.n_namesz = strlen(fullname) + 1;
+       en.n_descsz = sz;
+       en.n_type = NT_SPU;
+
+       if (!spufs_dump_write(file, &en, sizeof(en)))
+               return;
+       if (!spufs_dump_write(file, fullname, en.n_namesz))
+               return;
+       if (!spufs_dump_seek(file, roundup((unsigned long)file->f_pos, 4)))
+               return;
+
+       do {
+               rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
+               if (rc > 0) {
+                       if (!spufs_dump_write(file, buf, rc))
+                               return;
+                       total += rc;
+               }
+       } while (rc == bufsz && total < sz);
+
+       spufs_dump_seek(file, roundup((unsigned long)file->f_pos
+                                               - total + sz, 4));
+}
+
+static void spufs_arch_write_notes(struct file *file)
+{
+       int j;
+       struct spufs_ctx_info *ctx_info, *next;
+
+       list_for_each_entry_safe(ctx_info, next, &ctx_info_list, list) {
+               spu_acquire_saved(ctx_info->ctx);
+               for (j = 0; j < spufs_coredump_num_notes; j++)
+                       spufs_arch_write_note(ctx_info, j, file);
+               spu_release(ctx_info->ctx);
+               list_del(&ctx_info->list);
+               kfree(ctx_info);
+       }
+}
+
+struct spu_coredump_calls spufs_coredump_calls = {
+       .arch_notes_size = spufs_arch_notes_size,
+       .arch_write_notes = spufs_arch_write_notes,
+       .owner = THIS_MODULE,
+};
index 533e2723e1840eafd8645a962f1f6b0cf4ef96c9..347eff56fcbd59d81640e9ae40fa4a72e9cea50d 100644 (file)
 #include <asm/io.h>
 #include <asm/semaphore.h>
 #include <asm/spu.h>
+#include <asm/spu_info.h>
 #include <asm/uaccess.h>
 
 #include "spufs.h"
 
 #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
 
-
 static int
 spufs_mem_open(struct inode *inode, struct file *file)
 {
@@ -50,19 +50,24 @@ spufs_mem_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static ssize_t
+__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
+                       size_t size, loff_t *pos)
+{
+       char *local_store = ctx->ops->get_ls(ctx);
+       return simple_read_from_buffer(buffer, size, pos, local_store,
+                                       LS_SIZE);
+}
+
 static ssize_t
 spufs_mem_read(struct file *file, char __user *buffer,
                                size_t size, loff_t *pos)
 {
-       struct spu_context *ctx = file->private_data;
-       char *local_store;
        int ret;
+       struct spu_context *ctx = file->private_data;
 
        spu_acquire(ctx);
-
-       local_store = ctx->ops->get_ls(ctx);
-       ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
-
+       ret = __spufs_mem_read(ctx, buffer, size, pos);
        spu_release(ctx);
        return ret;
 }
@@ -104,11 +109,11 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
 
        if (ctx->state == SPU_STATE_SAVED) {
                vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                       & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
+                                                       & ~_PAGE_NO_CACHE);
                page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
        } else {
                vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
-                                       | _PAGE_NO_CACHE | _PAGE_GUARDED);
+                                                       | _PAGE_NO_CACHE);
                page = pfn_to_page((ctx->spu->local_store_phys + offset)
                                   >> PAGE_SHIFT);
        }
@@ -131,7 +136,7 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       /* FIXME: */
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE);
 
@@ -200,7 +205,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -260,19 +265,24 @@ spufs_regs_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static ssize_t
+__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
+                       size_t size, loff_t *pos)
+{
+       struct spu_lscsa *lscsa = ctx->csa.lscsa;
+       return simple_read_from_buffer(buffer, size, pos,
+                                     lscsa->gprs, sizeof lscsa->gprs);
+}
+
 static ssize_t
 spufs_regs_read(struct file *file, char __user *buffer,
                size_t size, loff_t *pos)
 {
-       struct spu_context *ctx = file->private_data;
-       struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
+       struct spu_context *ctx = file->private_data;
 
        spu_acquire_saved(ctx);
-
-       ret = simple_read_from_buffer(buffer, size, pos,
-                                     lscsa->gprs, sizeof lscsa->gprs);
-
+       ret = __spufs_regs_read(ctx, buffer, size, pos);
        spu_release(ctx);
        return ret;
 }
@@ -306,19 +316,24 @@ static struct file_operations spufs_regs_fops = {
        .llseek  = generic_file_llseek,
 };
 
+static ssize_t
+__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
+                       size_t size, loff_t * pos)
+{
+       struct spu_lscsa *lscsa = ctx->csa.lscsa;
+       return simple_read_from_buffer(buffer, size, pos,
+                                     &lscsa->fpcr, sizeof(lscsa->fpcr));
+}
+
 static ssize_t
 spufs_fpcr_read(struct file *file, char __user * buffer,
                size_t size, loff_t * pos)
 {
-       struct spu_context *ctx = file->private_data;
-       struct spu_lscsa *lscsa = ctx->csa.lscsa;
        int ret;
+       struct spu_context *ctx = file->private_data;
 
        spu_acquire_saved(ctx);
-
-       ret = simple_read_from_buffer(buffer, size, pos,
-                                     &lscsa->fpcr, sizeof(lscsa->fpcr));
-
+       ret = __spufs_fpcr_read(ctx, buffer, size, pos);
        spu_release(ctx);
        return ret;
 }
@@ -718,23 +733,41 @@ static int spufs_signal1_open(struct inode *inode, struct file *file)
        return nonseekable_open(inode, file);
 }
 
-static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
                        size_t len, loff_t *pos)
 {
-       struct spu_context *ctx = file->private_data;
+       int ret = 0;
        u32 data;
 
        if (len < 4)
                return -EINVAL;
 
-       spu_acquire(ctx);
-       data = ctx->ops->signal1_read(ctx);
-       spu_release(ctx);
+       if (ctx->csa.spu_chnlcnt_RW[3]) {
+               data = ctx->csa.spu_chnldata_RW[3];
+               ret = 4;
+       }
+
+       if (!ret)
+               goto out;
 
        if (copy_to_user(buf, &data, 4))
                return -EFAULT;
 
-       return 4;
+out:
+       return ret;
+}
+
+static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       int ret;
+       struct spu_context *ctx = file->private_data;
+
+       spu_acquire_saved(ctx);
+       ret = __spufs_signal1_read(ctx, buf, len, pos);
+       spu_release(ctx);
+
+       return ret;
 }
 
 static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
@@ -782,7 +815,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -807,25 +840,41 @@ static int spufs_signal2_open(struct inode *inode, struct file *file)
        return nonseekable_open(inode, file);
 }
 
-static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
                        size_t len, loff_t *pos)
 {
-       struct spu_context *ctx;
+       int ret = 0;
        u32 data;
 
-       ctx = file->private_data;
-
        if (len < 4)
                return -EINVAL;
 
-       spu_acquire(ctx);
-       data = ctx->ops->signal2_read(ctx);
-       spu_release(ctx);
+       if (ctx->csa.spu_chnlcnt_RW[4]) {
+               data =  ctx->csa.spu_chnldata_RW[4];
+               ret = 4;
+       }
+
+       if (!ret)
+               goto out;
 
        if (copy_to_user(buf, &data, 4))
                return -EFAULT;
 
-       return 4;
+out:
+       return ret;
+}
+
+static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       int ret;
+
+       spu_acquire_saved(ctx);
+       ret = __spufs_signal2_read(ctx, buf, len, pos);
+       spu_release(ctx);
+
+       return ret;
 }
 
 static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
@@ -874,8 +923,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       /* FIXME: */
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -902,13 +950,19 @@ static void spufs_signal1_type_set(void *data, u64 val)
        spu_release(ctx);
 }
 
+static u64 __spufs_signal1_type_get(void *data)
+{
+       struct spu_context *ctx = data;
+       return ctx->ops->signal1_type_get(ctx);
+}
+
 static u64 spufs_signal1_type_get(void *data)
 {
        struct spu_context *ctx = data;
        u64 ret;
 
        spu_acquire(ctx);
-       ret = ctx->ops->signal1_type_get(ctx);
+       ret = __spufs_signal1_type_get(data);
        spu_release(ctx);
 
        return ret;
@@ -925,13 +979,19 @@ static void spufs_signal2_type_set(void *data, u64 val)
        spu_release(ctx);
 }
 
+static u64 __spufs_signal2_type_get(void *data)
+{
+       struct spu_context *ctx = data;
+       return ctx->ops->signal2_type_get(ctx);
+}
+
 static u64 spufs_signal2_type_get(void *data)
 {
        struct spu_context *ctx = data;
        u64 ret;
 
        spu_acquire(ctx);
-       ret = ctx->ops->signal2_type_get(ctx);
+       ret = __spufs_signal2_type_get(data);
        spu_release(ctx);
 
        return ret;
@@ -958,7 +1018,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1000,7 +1060,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1041,7 +1101,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
        if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO;
        vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
                                     | _PAGE_NO_CACHE | _PAGE_GUARDED);
 
@@ -1265,6 +1325,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
                goto out;
 
        ctx->tagwait |= 1 << cmd.tag;
+       ret = size;
 
 out:
        return ret;
@@ -1360,7 +1421,8 @@ static u64 spufs_npc_get(void *data)
        spu_release(ctx);
        return ret;
 }
-DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set,
+                       "0x%llx\n")
 
 static void spufs_decr_set(void *data, u64 val)
 {
@@ -1371,18 +1433,24 @@ static void spufs_decr_set(void *data, u64 val)
        spu_release(ctx);
 }
 
-static u64 spufs_decr_get(void *data)
+static u64 __spufs_decr_get(void *data)
 {
        struct spu_context *ctx = data;
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
+       return lscsa->decr.slot[0];
+}
+
+static u64 spufs_decr_get(void *data)
+{
+       struct spu_context *ctx = data;
        u64 ret;
        spu_acquire_saved(ctx);
-       ret = lscsa->decr.slot[0];
+       ret = __spufs_decr_get(data);
        spu_release(ctx);
        return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
-                       "%llx\n")
+                       "0x%llx\n")
 
 static void spufs_decr_status_set(void *data, u64 val)
 {
@@ -1393,62 +1461,76 @@ static void spufs_decr_status_set(void *data, u64 val)
        spu_release(ctx);
 }
 
-static u64 spufs_decr_status_get(void *data)
+static u64 __spufs_decr_status_get(void *data)
 {
        struct spu_context *ctx = data;
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
+       return lscsa->decr_status.slot[0];
+}
+
+static u64 spufs_decr_status_get(void *data)
+{
+       struct spu_context *ctx = data;
        u64 ret;
        spu_acquire_saved(ctx);
-       ret = lscsa->decr_status.slot[0];
+       ret = __spufs_decr_status_get(data);
        spu_release(ctx);
        return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
-                       spufs_decr_status_set, "%llx\n")
+                       spufs_decr_status_set, "0x%llx\n")
 
-static void spufs_spu_tag_mask_set(void *data, u64 val)
+static void spufs_event_mask_set(void *data, u64 val)
 {
        struct spu_context *ctx = data;
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
        spu_acquire_saved(ctx);
-       lscsa->tag_mask.slot[0] = (u32) val;
+       lscsa->event_mask.slot[0] = (u32) val;
        spu_release(ctx);
 }
 
-static u64 spufs_spu_tag_mask_get(void *data)
+static u64 __spufs_event_mask_get(void *data)
 {
        struct spu_context *ctx = data;
        struct spu_lscsa *lscsa = ctx->csa.lscsa;
+       return lscsa->event_mask.slot[0];
+}
+
+static u64 spufs_event_mask_get(void *data)
+{
+       struct spu_context *ctx = data;
        u64 ret;
        spu_acquire_saved(ctx);
-       ret = lscsa->tag_mask.slot[0];
+       ret = __spufs_event_mask_get(data);
        spu_release(ctx);
        return ret;
 }
-DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
-                       spufs_spu_tag_mask_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
+                       spufs_event_mask_set, "0x%llx\n")
 
-static void spufs_event_mask_set(void *data, u64 val)
+static u64 __spufs_event_status_get(void *data)
 {
        struct spu_context *ctx = data;
-       struct spu_lscsa *lscsa = ctx->csa.lscsa;
-       spu_acquire_saved(ctx);
-       lscsa->event_mask.slot[0] = (u32) val;
-       spu_release(ctx);
+       struct spu_state *state = &ctx->csa;
+       u64 stat;
+       stat = state->spu_chnlcnt_RW[0];
+       if (stat)
+               return state->spu_chnldata_RW[0];
+       return 0;
 }
 
-static u64 spufs_event_mask_get(void *data)
+static u64 spufs_event_status_get(void *data)
 {
        struct spu_context *ctx = data;
-       struct spu_lscsa *lscsa = ctx->csa.lscsa;
-       u64 ret;
+       u64 ret = 0;
+
        spu_acquire_saved(ctx);
-       ret = lscsa->event_mask.slot[0];
+       ret = __spufs_event_status_get(data);
        spu_release(ctx);
        return ret;
 }
-DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
-                       spufs_event_mask_set, "%llx\n")
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get,
+                       NULL, "0x%llx\n")
 
 static void spufs_srr0_set(void *data, u64 val)
 {
@@ -1470,7 +1552,7 @@ static u64 spufs_srr0_get(void *data)
        return ret;
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
-                       "%llx\n")
+                       "0x%llx\n")
 
 static u64 spufs_id_get(void *data)
 {
@@ -1488,12 +1570,18 @@ static u64 spufs_id_get(void *data)
 }
 DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
 
-static u64 spufs_object_id_get(void *data)
+static u64 __spufs_object_id_get(void *data)
 {
        struct spu_context *ctx = data;
        return ctx->object_id;
 }
 
+static u64 spufs_object_id_get(void *data)
+{
+       /* FIXME: Should there really be no locking here? */
+       return __spufs_object_id_get(data);
+}
+
 static void spufs_object_id_set(void *data, u64 id)
 {
        struct spu_context *ctx = data;
@@ -1503,6 +1591,250 @@ static void spufs_object_id_set(void *data, u64 id)
 DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
                spufs_object_id_set, "0x%llx\n");
 
+static u64 __spufs_lslr_get(void *data)
+{
+       struct spu_context *ctx = data;
+       return ctx->csa.priv2.spu_lslr_RW;
+}
+
+static u64 spufs_lslr_get(void *data)
+{
+       struct spu_context *ctx = data;
+       u64 ret;
+
+       spu_acquire_saved(ctx);
+       ret = __spufs_lslr_get(data);
+       spu_release(ctx);
+
+       return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n")
+
+static int spufs_info_open(struct inode *inode, struct file *file)
+{
+       struct spufs_inode_info *i = SPUFS_I(inode);
+       struct spu_context *ctx = i->i_ctx;
+       file->private_data = ctx;
+       return 0;
+}
+
+static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
+                       char __user *buf, size_t len, loff_t *pos)
+{
+       u32 mbox_stat;
+       u32 data;
+
+       mbox_stat = ctx->csa.prob.mb_stat_R;
+       if (mbox_stat & 0x0000ff) {
+               data = ctx->csa.prob.pu_mb_R;
+       }
+
+       return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
+                                  size_t len, loff_t *pos)
+{
+       int ret;
+       struct spu_context *ctx = file->private_data;
+
+       if (!access_ok(VERIFY_WRITE, buf, len))
+               return -EFAULT;
+
+       spu_acquire_saved(ctx);
+       spin_lock(&ctx->csa.register_lock);
+       ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+       spin_unlock(&ctx->csa.register_lock);
+       spu_release(ctx);
+
+       return ret;
+}
+
+static struct file_operations spufs_mbox_info_fops = {
+       .open = spufs_info_open,
+       .read = spufs_mbox_info_read,
+       .llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
+                               char __user *buf, size_t len, loff_t *pos)
+{
+       u32 ibox_stat;
+       u32 data;
+
+       ibox_stat = ctx->csa.prob.mb_stat_R;
+       if (ibox_stat & 0xff0000) {
+               data = ctx->csa.priv2.puint_mb_R;
+       }
+
+       return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+}
+
+static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
+                                  size_t len, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       int ret;
+
+       if (!access_ok(VERIFY_WRITE, buf, len))
+               return -EFAULT;
+
+       spu_acquire_saved(ctx);
+       spin_lock(&ctx->csa.register_lock);
+       ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+       spin_unlock(&ctx->csa.register_lock);
+       spu_release(ctx);
+
+       return ret;
+}
+
+static struct file_operations spufs_ibox_info_fops = {
+       .open = spufs_info_open,
+       .read = spufs_ibox_info_read,
+       .llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
+                       char __user *buf, size_t len, loff_t *pos)
+{
+       int i, cnt;
+       u32 data[4];
+       u32 wbox_stat;
+
+       wbox_stat = ctx->csa.prob.mb_stat_R;
+       cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
+       for (i = 0; i < cnt; i++) {
+               data[i] = ctx->csa.spu_mailbox_data[i];
+       }
+
+       return simple_read_from_buffer(buf, len, pos, &data,
+                               cnt * sizeof(u32));
+}
+
+static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
+                                  size_t len, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       int ret;
+
+       if (!access_ok(VERIFY_WRITE, buf, len))
+               return -EFAULT;
+
+       spu_acquire_saved(ctx);
+       spin_lock(&ctx->csa.register_lock);
+       ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+       spin_unlock(&ctx->csa.register_lock);
+       spu_release(ctx);
+
+       return ret;
+}
+
+static struct file_operations spufs_wbox_info_fops = {
+       .open = spufs_info_open,
+       .read = spufs_wbox_info_read,
+       .llseek  = generic_file_llseek,
+};
+
+static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
+                       char __user *buf, size_t len, loff_t *pos)
+{
+       struct spu_dma_info info;
+       struct mfc_cq_sr *qp, *spuqp;
+       int i;
+
+       info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+       info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+       info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
+       info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+       info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+       for (i = 0; i < 16; i++) {
+               qp = &info.dma_info_command_data[i];
+               spuqp = &ctx->csa.priv2.spuq[i];
+
+               qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
+               qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
+               qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
+               qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
+       }
+
+       return simple_read_from_buffer(buf, len, pos, &info,
+                               sizeof info);
+}
+
+static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
+                             size_t len, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       int ret;
+
+       if (!access_ok(VERIFY_WRITE, buf, len))
+               return -EFAULT;
+
+       spu_acquire_saved(ctx);
+       spin_lock(&ctx->csa.register_lock);
+       ret = __spufs_dma_info_read(ctx, buf, len, pos);
+       spin_unlock(&ctx->csa.register_lock);
+       spu_release(ctx);
+
+       return ret;
+}
+
+static struct file_operations spufs_dma_info_fops = {
+       .open = spufs_info_open,
+       .read = spufs_dma_info_read,
+};
+
+static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
+                       char __user *buf, size_t len, loff_t *pos)
+{
+       struct spu_proxydma_info info;
+       struct mfc_cq_sr *qp, *puqp;
+       int ret = sizeof info;
+       int i;
+
+       if (len < ret)
+               return -EINVAL;
+
+       if (!access_ok(VERIFY_WRITE, buf, len))
+               return -EFAULT;
+
+       info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+       info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+       info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
+       for (i = 0; i < 8; i++) {
+               qp = &info.proxydma_info_command_data[i];
+               puqp = &ctx->csa.priv2.puq[i];
+
+               qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
+               qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
+               qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
+               qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
+       }
+
+       return simple_read_from_buffer(buf, len, pos, &info,
+                               sizeof info);
+}
+
+static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
+                                  size_t len, loff_t *pos)
+{
+       struct spu_context *ctx = file->private_data;
+       int ret;
+
+       spu_acquire_saved(ctx);
+       spin_lock(&ctx->csa.register_lock);
+       ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+       spin_unlock(&ctx->csa.register_lock);
+       spu_release(ctx);
+
+       return ret;
+}
+
+static struct file_operations spufs_proxydma_info_fops = {
+       .open = spufs_info_open,
+       .read = spufs_proxydma_info_read,
+};
+
 struct tree_descr spufs_dir_contents[] = {
        { "mem",  &spufs_mem_fops,  0666, },
        { "regs", &spufs_regs_fops,  0666, },
@@ -1516,18 +1848,70 @@ struct tree_descr spufs_dir_contents[] = {
        { "signal2", &spufs_signal2_fops, 0666, },
        { "signal1_type", &spufs_signal1_type, 0666, },
        { "signal2_type", &spufs_signal2_type, 0666, },
-       { "mss", &spufs_mss_fops, 0666, },
-       { "mfc", &spufs_mfc_fops, 0666, },
        { "cntl", &spufs_cntl_fops,  0666, },
-       { "npc", &spufs_npc_ops, 0666, },
        { "fpcr", &spufs_fpcr_fops, 0666, },
+       { "lslr", &spufs_lslr_ops, 0444, },
+       { "mfc", &spufs_mfc_fops, 0666, },
+       { "mss", &spufs_mss_fops, 0666, },
+       { "npc", &spufs_npc_ops, 0666, },
+       { "srr0", &spufs_srr0_ops, 0666, },
        { "decr", &spufs_decr_ops, 0666, },
        { "decr_status", &spufs_decr_status_ops, 0666, },
-       { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
        { "event_mask", &spufs_event_mask_ops, 0666, },
-       { "srr0", &spufs_srr0_ops, 0666, },
+       { "event_status", &spufs_event_status_ops, 0444, },
+       { "psmap", &spufs_psmap_fops, 0666, },
+       { "phys-id", &spufs_id_ops, 0666, },
+       { "object-id", &spufs_object_id_ops, 0666, },
+       { "mbox_info", &spufs_mbox_info_fops, 0444, },
+       { "ibox_info", &spufs_ibox_info_fops, 0444, },
+       { "wbox_info", &spufs_wbox_info_fops, 0444, },
+       { "dma_info", &spufs_dma_info_fops, 0444, },
+       { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
+       {},
+};
+
+struct tree_descr spufs_dir_nosched_contents[] = {
+       { "mem",  &spufs_mem_fops,  0666, },
+       { "mbox", &spufs_mbox_fops, 0444, },
+       { "ibox", &spufs_ibox_fops, 0444, },
+       { "wbox", &spufs_wbox_fops, 0222, },
+       { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+       { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+       { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+       { "signal1", &spufs_signal1_fops, 0666, },
+       { "signal2", &spufs_signal2_fops, 0666, },
+       { "signal1_type", &spufs_signal1_type, 0666, },
+       { "signal2_type", &spufs_signal2_type, 0666, },
+       { "mss", &spufs_mss_fops, 0666, },
+       { "mfc", &spufs_mfc_fops, 0666, },
+       { "cntl", &spufs_cntl_fops,  0666, },
+       { "npc", &spufs_npc_ops, 0666, },
        { "psmap", &spufs_psmap_fops, 0666, },
        { "phys-id", &spufs_id_ops, 0666, },
        { "object-id", &spufs_object_id_ops, 0666, },
        {},
 };
+
+struct spufs_coredump_reader spufs_coredump_read[] = {
+       { "regs", __spufs_regs_read, NULL, 128 * 16 },
+       { "fpcr", __spufs_fpcr_read, NULL, 16 },
+       { "lslr", NULL, __spufs_lslr_get, 11 },
+       { "decr", NULL, __spufs_decr_get, 11 },
+       { "decr_status", NULL, __spufs_decr_status_get, 11 },
+       { "mem", __spufs_mem_read, NULL, 256 * 1024, },
+       { "signal1", __spufs_signal1_read, NULL, 4 },
+       { "signal1_type", NULL, __spufs_signal1_type_get, 2 },
+       { "signal2", __spufs_signal2_read, NULL, 4 },
+       { "signal2_type", NULL, __spufs_signal2_type_get, 2 },
+       { "event_mask", NULL, __spufs_event_mask_get, 8 },
+       { "event_status", NULL, __spufs_event_status_get, 8 },
+       { "mbox_info", __spufs_mbox_info_read, NULL, 4 },
+       { "ibox_info", __spufs_ibox_info_read, NULL, 4 },
+       { "wbox_info", __spufs_wbox_info_read, NULL, 16 },
+       { "dma_info", __spufs_dma_info_read, NULL, 69 * 8 },
+       { "proxydma_info", __spufs_proxydma_info_read, NULL, 35 * 8 },
+       { "object-id", NULL, __spufs_object_id_get, 19 },
+       { },
+};
+int spufs_coredump_num_notes = ARRAY_SIZE(spufs_coredump_read) - 1;
+
index d805ffed892d4721db9a697720a31e097e90f974..ae42e03b8c8687f1b7ff183bc371f33164280e09 100644 (file)
@@ -135,21 +135,11 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
        return ret;
 }
 
-static u32 spu_hw_signal1_read(struct spu_context *ctx)
-{
-       return in_be32(&ctx->spu->problem->signal_notify1);
-}
-
 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
 {
        out_be32(&ctx->spu->problem->signal_notify1, data);
 }
 
-static u32 spu_hw_signal2_read(struct spu_context *ctx)
-{
-       return in_be32(&ctx->spu->problem->signal_notify2);
-}
-
 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
 {
        out_be32(&ctx->spu->problem->signal_notify2, data);
@@ -217,21 +207,42 @@ static char *spu_hw_get_ls(struct spu_context *ctx)
        return ctx->spu->local_store;
 }
 
-static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
+static u32 spu_hw_runcntl_read(struct spu_context *ctx)
 {
-       eieio();
-       out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
+       return in_be32(&ctx->spu->problem->spu_runcntl_RW);
 }
 
-static void spu_hw_runcntl_stop(struct spu_context *ctx)
+static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
 {
        spin_lock_irq(&ctx->spu->register_lock);
-       out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
-       while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
-               cpu_relax();
+       if (val & SPU_RUNCNTL_ISOLATE)
+               out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
+       out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
        spin_unlock_irq(&ctx->spu->register_lock);
 }
 
+static void spu_hw_master_start(struct spu_context *ctx)
+{
+       struct spu *spu = ctx->spu;
+       u64 sr1;
+
+       spin_lock_irq(&spu->register_lock);
+       sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+       spu_mfc_sr1_set(spu, sr1);
+       spin_unlock_irq(&spu->register_lock);
+}
+
+static void spu_hw_master_stop(struct spu_context *ctx)
+{
+       struct spu *spu = ctx->spu;
+       u64 sr1;
+
+       spin_lock_irq(&spu->register_lock);
+       sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+       spu_mfc_sr1_set(spu, sr1);
+       spin_unlock_irq(&spu->register_lock);
+}
+
 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
 {
        struct spu_problem __iomem *prob = ctx->spu->problem;
@@ -291,9 +302,7 @@ struct spu_context_ops spu_hw_ops = {
        .mbox_stat_poll = spu_hw_mbox_stat_poll,
        .ibox_read = spu_hw_ibox_read,
        .wbox_write = spu_hw_wbox_write,
-       .signal1_read = spu_hw_signal1_read,
        .signal1_write = spu_hw_signal1_write,
-       .signal2_read = spu_hw_signal2_read,
        .signal2_write = spu_hw_signal2_write,
        .signal1_type_set = spu_hw_signal1_type_set,
        .signal1_type_get = spu_hw_signal1_type_get,
@@ -303,8 +312,10 @@ struct spu_context_ops spu_hw_ops = {
        .npc_write = spu_hw_npc_write,
        .status_read = spu_hw_status_read,
        .get_ls = spu_hw_get_ls,
+       .runcntl_read = spu_hw_runcntl_read,
        .runcntl_write = spu_hw_runcntl_write,
-       .runcntl_stop = spu_hw_runcntl_stop,
+       .master_start = spu_hw_master_start,
+       .master_stop = spu_hw_master_stop,
        .set_mfc_query = spu_hw_set_mfc_query,
        .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
        .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
index 427d00a4f6a0884b22018d4f07929992f2370a7a..e3af9112c02620a794ce051a685417b9086cf1f5 100644 (file)
 #include <linux/slab.h>
 #include <linux/parser.h>
 
-#include <asm/io.h>
+#include <asm/prom.h>
 #include <asm/semaphore.h>
 #include <asm/spu.h>
 #include <asm/uaccess.h>
 
 #include "spufs.h"
 
-static kmem_cache_t *spufs_inode_cache;
+static struct kmem_cache *spufs_inode_cache;
+char *isolated_loader;
 
 static struct inode *
 spufs_alloc_inode(struct super_block *sb)
 {
        struct spufs_inode_info *ei;
 
-       ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+       ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
        if (!ei)
                return NULL;
 
@@ -64,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
 }
 
 static void
-spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
 {
        struct spufs_inode_info *ei = p;
 
@@ -231,6 +232,7 @@ struct file_operations spufs_context_fops = {
        .readdir        = dcache_readdir,
        .fsync          = simple_sync_file,
 };
+EXPORT_SYMBOL_GPL(spufs_context_fops);
 
 static int
 spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
@@ -255,10 +257,14 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
                goto out_iput;
 
        ctx->flags = flags;
-
        inode->i_op = &spufs_dir_inode_operations;
        inode->i_fop = &simple_dir_operations;
-       ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+       if (flags & SPU_CREATE_NOSCHED)
+               ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
+                                        mode, ctx);
+       else
+               ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+
        if (ret)
                goto out_free_ctx;
 
@@ -307,6 +313,20 @@ static int spufs_create_context(struct inode *inode,
 {
        int ret;
 
+       ret = -EPERM;
+       if ((flags & SPU_CREATE_NOSCHED) &&
+           !capable(CAP_SYS_NICE))
+               goto out_unlock;
+
+       ret = -EINVAL;
+       if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
+           == SPU_CREATE_ISOLATE)
+               goto out_unlock;
+
+       ret = -ENODEV;
+       if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
+               goto out_unlock;
+
        ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
        if (ret)
                goto out_unlock;
@@ -540,6 +560,30 @@ spufs_parse_options(char *options, struct inode *root)
        return 1;
 }
 
+static void
+spufs_init_isolated_loader(void)
+{
+       struct device_node *dn;
+       const char *loader;
+       int size;
+
+       dn = of_find_node_by_path("/spu-isolation");
+       if (!dn)
+               return;
+
+       loader = get_property(dn, "loader", &size);
+       if (!loader)
+               return;
+
+       /* kmalloc should align on a 16 byte boundary..* */
+       isolated_loader = kmalloc(size, GFP_KERNEL);
+       if (!isolated_loader)
+               return;
+
+       memcpy(isolated_loader, loader, size);
+       printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
+}
+
 static int
 spufs_create_root(struct super_block *sb, void *data)
 {
@@ -608,6 +652,7 @@ static struct file_system_type spufs_type = {
 static int __init spufs_init(void)
 {
        int ret;
+
        ret = -ENOMEM;
        spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
                        sizeof(struct spufs_inode_info), 0,
@@ -625,6 +670,12 @@ static int __init spufs_init(void)
        ret = register_spu_syscalls(&spufs_calls);
        if (ret)
                goto out_fs;
+       ret = register_arch_coredump_calls(&spufs_coredump_calls);
+       if (ret)
+               goto out_fs;
+
+       spufs_init_isolated_loader();
+
        return 0;
 out_fs:
        unregister_filesystem(&spufs_type);
@@ -638,6 +689,7 @@ module_init(spufs_init);
 static void __exit spufs_exit(void)
 {
        spu_sched_exit();
+       unregister_arch_coredump_calls(&spufs_coredump_calls);
        unregister_spu_syscalls(&spufs_calls);
        unregister_filesystem(&spufs_type);
        kmem_cache_destroy(spufs_inode_cache);
index 63df8cf4ba1607e13ee8c01cdab8a392da12835b..1acc2ffef8c8ac67749be5dbf1a43ef176648304 100644 (file)
@@ -1,7 +1,11 @@
+#define DEBUG
+
 #include <linux/wait.h>
 #include <linux/ptrace.h>
 
 #include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/io.h>
 #include <asm/unistd.h>
 
 #include "spufs.h"
@@ -24,6 +28,7 @@ void spufs_dma_callback(struct spu *spu, int type)
        } else {
                switch (type) {
                case SPE_EVENT_DMA_ALIGNMENT:
+               case SPE_EVENT_SPE_DATA_STORAGE:
                case SPE_EVENT_INVALID_DMA:
                        force_sig(SIGBUS, /* info, */ current);
                        break;
@@ -48,15 +53,122 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
        return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
 }
 
+static int spu_setup_isolated(struct spu_context *ctx)
+{
+       int ret;
+       u64 __iomem *mfc_cntl;
+       u64 sr1;
+       u32 status;
+       unsigned long timeout;
+       const u32 status_loading = SPU_STATUS_RUNNING
+               | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
+
+       if (!isolated_loader)
+               return -ENODEV;
+
+       ret = spu_acquire_exclusive(ctx);
+       if (ret)
+               goto out;
+
+       mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
+
+       /* purge the MFC DMA queue to ensure no spurious accesses before we
+        * enter kernel mode */
+       timeout = jiffies + HZ;
+       out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
+       while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
+                       != MFC_CNTL_PURGE_DMA_COMPLETE) {
+               if (time_after(jiffies, timeout)) {
+                       printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
+                                       __FUNCTION__);
+                       ret = -EIO;
+                       goto out_unlock;
+               }
+               cond_resched();
+       }
+
+       /* put the SPE in kernel mode to allow access to the loader */
+       sr1 = spu_mfc_sr1_get(ctx->spu);
+       sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
+       spu_mfc_sr1_set(ctx->spu, sr1);
+
+       /* start the loader */
+       ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
+       ctx->ops->signal2_write(ctx,
+                       (unsigned long)isolated_loader & 0xffffffff);
+
+       ctx->ops->runcntl_write(ctx,
+                       SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+
+       ret = 0;
+       timeout = jiffies + HZ;
+       while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
+                               status_loading) {
+               if (time_after(jiffies, timeout)) {
+                       printk(KERN_ERR "%s: timeout waiting for loader\n",
+                                       __FUNCTION__);
+                       ret = -EIO;
+                       goto out_drop_priv;
+               }
+               cond_resched();
+       }
+
+       if (!(status & SPU_STATUS_RUNNING)) {
+               /* If isolated LOAD has failed: run SPU, we will get a stop-and
+                * signal later. */
+               pr_debug("%s: isolated LOAD failed\n", __FUNCTION__);
+               ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
+               ret = -EACCES;
+
+       } else if (!(status & SPU_STATUS_ISOLATED_STATE)) {
+               /* This isn't allowed by the CBEA, but check anyway */
+               pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__);
+               ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
+               ret = -EINVAL;
+       }
+
+out_drop_priv:
+       /* Finished accessing the loader. Drop kernel mode */
+       sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
+       spu_mfc_sr1_set(ctx->spu, sr1);
+
+out_unlock:
+       spu_release_exclusive(ctx);
+out:
+       return ret;
+}
+
 static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
 {
        int ret;
+       unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
 
-       if ((ret = spu_acquire_runnable(ctx)) != 0)
+       ret = spu_acquire_runnable(ctx);
+       if (ret)
                return ret;
-       ctx->ops->npc_write(ctx, *npc);
-       ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
-       return 0;
+
+       if (ctx->flags & SPU_CREATE_ISOLATE) {
+               if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
+                       /* Need to release ctx, because spu_setup_isolated will
+                        * acquire it exclusively.
+                        */
+                       spu_release(ctx);
+                       ret = spu_setup_isolated(ctx);
+                       if (!ret)
+                               ret = spu_acquire_runnable(ctx);
+               }
+
+               /* if userspace has set the runcntrl register (eg, to issue an
+                * isolated exit), we need to re-set it here */
+               runcntl = ctx->ops->runcntl_read(ctx) &
+                       (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
+               if (runcntl == 0)
+                       runcntl = SPU_RUNCNTL_RUNNABLE;
+       } else
+               ctx->ops->npc_write(ctx, *npc);
+
+       ctx->ops->runcntl_write(ctx, runcntl);
+       return ret;
 }
 
 static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
@@ -70,13 +182,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
 
        if (signal_pending(current))
                ret = -ERESTARTSYS;
-       if (unlikely(current->ptrace & PT_PTRACED)) {
-               if ((*status & SPU_STATUS_STOPPED_BY_STOP)
-                   && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
-                       force_sig(SIGTRAP, current);
-                       ret = -ERESTARTSYS;
-               }
-       }
+
        return ret;
 }
 
@@ -204,6 +310,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
        if (down_interruptible(&ctx->run_sema))
                return -ERESTARTSYS;
 
+       ctx->ops->master_start(ctx);
        ctx->event_return = 0;
        ret = spu_run_init(ctx, npc);
        if (ret)
@@ -223,7 +330,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
                if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
                        ret = spu_reacquire_runnable(ctx, npc, &status);
                        if (ret)
-                               goto out;
+                               goto out2;
                        continue;
                }
                ret = spu_process_events(ctx);
@@ -231,12 +338,24 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
        } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
                                      SPU_STATUS_STOPPED_BY_HALT)));
 
-       ctx->ops->runcntl_stop(ctx);
+       ctx->ops->master_stop(ctx);
        ret = spu_run_fini(ctx, npc, &status);
-       if (!ret)
-               ret = status;
        spu_yield(ctx);
 
+out2:
+       if ((ret == 0) ||
+           ((ret == -ERESTARTSYS) &&
+            ((status & SPU_STATUS_STOPPED_BY_HALT) ||
+             ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+              (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
+               ret = status;
+
+       if ((status & SPU_STATUS_STOPPED_BY_STOP)
+           && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+               force_sig(SIGTRAP, current);
+               ret = -ERESTARTSYS;
+       }
+
 out:
        *event = ctx->event_return;
        up(&ctx->run_sema);
index a0f55ca2d488322c1165a82b647eb4196a13d872..70fb13395c04b47325e6940df9c089c921ac3a8a 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <asm/spu.h>
 #include <asm/spu_csa.h>
+#include <asm/spu_info.h>
 
 /* The magic number for our file system */
 enum {
@@ -114,13 +115,19 @@ struct spu_context_ops {
        void (*npc_write) (struct spu_context * ctx, u32 data);
         u32(*status_read) (struct spu_context * ctx);
        char*(*get_ls) (struct spu_context * ctx);
+        u32 (*runcntl_read) (struct spu_context * ctx);
        void (*runcntl_write) (struct spu_context * ctx, u32 data);
-       void (*runcntl_stop) (struct spu_context * ctx);
+       void (*master_start) (struct spu_context * ctx);
+       void (*master_stop) (struct spu_context * ctx);
        int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
        u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
        u32 (*get_mfc_free_elements)(struct spu_context *ctx);
-       int (*send_mfc_command)(struct spu_context *ctx,
-                                       struct mfc_dma_command *cmd);
+       int (*send_mfc_command)(struct spu_context * ctx,
+                               struct mfc_dma_command * cmd);
+       void (*dma_info_read) (struct spu_context * ctx,
+                              struct spu_dma_info * info);
+       void (*proxydma_info_read) (struct spu_context * ctx,
+                                   struct spu_proxydma_info * info);
 };
 
 extern struct spu_context_ops spu_hw_ops;
@@ -135,6 +142,7 @@ struct spufs_inode_info {
        container_of(inode, struct spufs_inode_info, vfs_inode)
 
 extern struct tree_descr spufs_dir_contents[];
+extern struct tree_descr spufs_dir_nosched_contents[];
 
 /* system call implementation */
 long spufs_run_spu(struct file *file,
@@ -162,6 +170,12 @@ void spu_acquire(struct spu_context *ctx);
 void spu_release(struct spu_context *ctx);
 int spu_acquire_runnable(struct spu_context *ctx);
 void spu_acquire_saved(struct spu_context *ctx);
+int spu_acquire_exclusive(struct spu_context *ctx);
+
+static inline void spu_release_exclusive(struct spu_context *ctx)
+{
+       up_write(&ctx->state_sema);
+}
 
 int spu_activate(struct spu_context *ctx, u64 flags);
 void spu_deactivate(struct spu_context *ctx);
@@ -169,6 +183,8 @@ void spu_yield(struct spu_context *ctx);
 int __init spu_sched_init(void);
 void __exit spu_sched_exit(void);
 
+extern char *isolated_loader;
+
 /*
  * spufs_wait
  *     Same as wait_event_interruptible(), except that here
@@ -207,4 +223,15 @@ void spufs_stop_callback(struct spu *spu);
 void spufs_mfc_callback(struct spu *spu);
 void spufs_dma_callback(struct spu *spu, int type);
 
+extern struct spu_coredump_calls spufs_coredump_calls;
+struct spufs_coredump_reader {
+       char *name;
+       ssize_t (*read)(struct spu_context *ctx,
+                       char __user *buffer, size_t size, loff_t *pos);
+       u64 (*get)(void *data);
+       size_t size;
+};
+extern struct spufs_coredump_reader spufs_coredump_read[];
+extern int spufs_coredump_num_notes;
+
 #endif
index 0f782ca662ba2cb6a844a173a90cca825728b644..c08981ff7fc6e283e5db7885b5abd8f42f73590f 100644 (file)
@@ -102,7 +102,7 @@ static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
         *     saved at this time.
         */
        isolate_state = SPU_STATUS_ISOLATED_STATE |
-           SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
+           SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
        return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
 }
 
@@ -1046,12 +1046,12 @@ static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
         */
        if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
                if (in_be32(&prob->spu_status_R) &
-                   SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+                   SPU_STATUS_ISOLATED_EXIT_STATUS) {
                        POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
                                        SPU_STATUS_RUNNING);
                }
                if ((in_be32(&prob->spu_status_R) &
-                    SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+                    SPU_STATUS_ISOLATED_LOAD_STATUS)
                    || (in_be32(&prob->spu_status_R) &
                        SPU_STATUS_ISOLATED_STATE)) {
                        out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
@@ -1085,7 +1085,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
         */
        if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
                if (in_be32(&prob->spu_status_R) &
-                   SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+                   SPU_STATUS_ISOLATED_EXIT_STATUS) {
                        spu_mfc_sr1_set(spu,
                                        MFC_STATE1_MASTER_RUN_CONTROL_MASK);
                        eieio();
@@ -1095,7 +1095,7 @@ static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
                                        SPU_STATUS_RUNNING);
                }
                if ((in_be32(&prob->spu_status_R) &
-                    SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+                    SPU_STATUS_ISOLATED_LOAD_STATUS)
                    || (in_be32(&prob->spu_status_R) &
                        SPU_STATUS_ISOLATED_STATE)) {
                        spu_mfc_sr1_set(spu,
@@ -1916,6 +1916,51 @@ static void save_lscsa(struct spu_state *prev, struct spu *spu)
        wait_spu_stopped(prev, spu);    /* Step 57. */
 }
 
+static void force_spu_isolate_exit(struct spu *spu)
+{
+       struct spu_problem __iomem *prob = spu->problem;
+       struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+       /* Stop SPE execution and wait for completion. */
+       out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+       iobarrier_rw();
+       POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
+
+       /* Restart SPE master runcntl. */
+       spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+       iobarrier_w();
+
+       /* Initiate isolate exit request and wait for completion. */
+       out_be64(&priv2->spu_privcntl_RW, 4LL);
+       iobarrier_w();
+       out_be32(&prob->spu_runcntl_RW, 2);
+       iobarrier_rw();
+       POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
+                               & SPU_STATUS_STOPPED_BY_STOP));
+
+       /* Reset load request to normal. */
+       out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
+       iobarrier_w();
+}
+
+/**
+ * stop_spu_isolate
+ *     Check SPU run-control state and force isolated
+ *     exit function as necessary.
+ */
+static void stop_spu_isolate(struct spu *spu)
+{
+       struct spu_problem __iomem *prob = spu->problem;
+
+       if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
+               /* The SPU is in isolated state; the only way
+                * to get it out is to perform an isolated
+                * exit (clean) operation.
+                */
+               force_spu_isolate_exit(spu);
+       }
+}
+
 static void harvest(struct spu_state *prev, struct spu *spu)
 {
        /*
@@ -1928,6 +1973,7 @@ static void harvest(struct spu_state *prev, struct spu *spu)
        inhibit_user_access(prev, spu);         /* Step 3.  */
        terminate_spu_app(prev, spu);           /* Step 4.  */
        set_switch_pending(prev, spu);          /* Step 5.  */
+       stop_spu_isolate(spu);                  /* NEW.     */
        remove_other_spu_access(prev, spu);     /* Step 6.  */
        suspend_mfc(prev, spu);                 /* Step 7.  */
        wait_suspend_mfc_complete(prev, spu);   /* Step 8.  */
@@ -2096,11 +2142,11 @@ int spu_save(struct spu_state *prev, struct spu *spu)
        acquire_spu_lock(spu);          /* Step 1.     */
        rc = __do_spu_save(prev, spu);  /* Steps 2-53. */
        release_spu_lock(spu);
-       if (rc) {
+       if (rc != 0 && rc != 2 && rc != 6) {
                panic("%s failed on SPU[%d], rc=%d.\n",
                      __func__, spu->number, rc);
        }
-       return rc;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(spu_save);
 
@@ -2165,9 +2211,6 @@ static void init_priv1(struct spu_state *csa)
            MFC_STATE1_PROBLEM_STATE_MASK |
            MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
 
-       /* Set storage description.  */
-       csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
-
        /* Enable OS-specific set of interrupts. */
        csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
            CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
index 996c28744e96a6d206a5f778dd562b5f30b3cf44..63f0aee4c158e97df652e49b43d00551aaa21e03 100644 (file)
@@ -9,4 +9,3 @@ extern long chrp_time_init(void);
 
 extern void chrp_find_bridges(void);
 extern void chrp_event_scan(unsigned long);
-extern void chrp_pcibios_fixup(void);
index 0f4340506c758bda68b119dd22cba485c065b6fb..ddb4a116ea89b7d0be044c7531cd87aeb59d7093 100644 (file)
@@ -156,15 +156,6 @@ hydra_init(void)
        return 1;
 }
 
-void __init
-chrp_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
-}
-
 #define PRG_CL_RESET_VALID 0x00010000
 
 static void __init
index 49b8dabcbc992996db5a3c53dd00b9035f7e5a05..e1f51d45598445e00d6a3fbbfe0a445cd2fd01c7 100644 (file)
@@ -588,7 +588,6 @@ static int __init chrp_probe(void)
        ISA_DMA_THRESHOLD = ~0L;
        DMA_MODE_READ = 0x44;
        DMA_MODE_WRITE = 0x48;
-       isa_io_base = CHRP_ISA_IO_BASE;         /* default value */
 
        return 1;
 }
@@ -600,7 +599,6 @@ define_machine(chrp) {
        .init                   = chrp_init2,
        .show_cpuinfo           = chrp_show_cpuinfo,
        .init_IRQ               = chrp_init_IRQ,
-       .pcibios_fixup          = chrp_pcibios_fixup,
        .restart                = rtas_restart,
        .power_off              = rtas_power_off,
        .halt                   = rtas_halt,
index 234a861870a86672562ff07d3f37fb2f84fd5986..ddbe398fbd482d12dd02cff9df6c1f396b2e1b3d 100644 (file)
@@ -74,6 +74,18 @@ config SANDPOINT
          Select SANDPOINT if configuring for a Motorola Sandpoint X3
          (any flavor).
 
+config LINKSTATION
+       bool "Linkstation / Kurobox(HG) from Buffalo"
+       select MPIC
+       select FSL_SOC
+       select PPC_UDBG_16550 if SERIAL_8250
+       help
+         Select LINKSTATION if configuring for one of PPC- (MPC8241)
+         based NAS systems from Buffalo Technology. So far only
+         KuroboxHG has been tested. In the future classical Kurobox,
+         Linkstation-I HD-HLAN and HD-HGLAN versions, and PPC-based
+         Terastation systems should be supported too.
+
 config MPC7448HPC2
        bool "Freescale MPC7448HPC2(Taiga)"
        select TSI108_BRIDGE
@@ -146,15 +158,6 @@ config PQ2FADS
          Select PQ2FADS if you wish to configure for a Freescale
          PQ2FADS board (-VR or -ZU).
 
-config LITE5200
-       bool "Freescale LITE5200 / (IceCube)"
-       select PPC_MPC52xx
-       help
-         Support for the LITE5200 dev board for the MPC5200 from Freescale.
-         This is for the LITE5200 version 2.0 board. Don't know if it changes
-         much but it's only been tested on this board version. I think this
-         board is also known as IceCube.
-
 config EV64360
        bool "Marvell-EV64360BP"
        help
@@ -172,9 +175,6 @@ config TQM8xxL
        depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
        default y
 
-config PPC_MPC52xx
-       bool
-
 config 8260
        bool "CPM2 Support" if WILLOW
        depends on 6xx
@@ -208,7 +208,7 @@ config PPC_GEN550
        depends on SANDPOINT || SPRUCE || PPLUS || \
                PRPMC750 || PRPMC800 || LOPEC || \
                (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
-               83xx
+               83xx || LINKSTATION
        default y
 
 config FORCE
@@ -282,13 +282,13 @@ config EPIC_SERIAL_MODE
 
 config MPC10X_BRIDGE
        bool
-       depends on POWERPMC250 || LOPEC || SANDPOINT
+       depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
        select PPC_INDIRECT_PCI
        default y
 
 config MPC10X_OPENPIC
        bool
-       depends on POWERPMC250 || LOPEC || SANDPOINT
+       depends on POWERPMC250 || LOPEC || SANDPOINT || LINKSTATION
        default y
 
 config MPC10X_STORE_GATHERING
index fa499fe59291b0c6d45d76b51a6585c070472beb..d3d11a3cd656f3cd0753fa9bbfcc7aa32fa4353d 100644 (file)
@@ -2,3 +2,4 @@
 # Makefile for the 6xx/7xx/7xxxx linux kernel.
 #
 obj-$(CONFIG_MPC7448HPC2)      += mpc7448_hpc2.o
+obj-$(CONFIG_LINKSTATION)      += linkstation.o ls_uart.o
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
new file mode 100644 (file)
index 0000000..61599d9
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Board setup routines for the Buffalo Linkstation / Kurobox Platform.
+ *
+ * Copyright (C) 2006 G. Liakhovetski (g.liakhovetski@gmx.de)
+ *
+ * Based on sandpoint.c by Mark A. Greer
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of
+ * any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/mtd/physmap.h>
+
+#include <asm/time.h>
+#include <asm/prom.h>
+#include <asm/mpic.h>
+#include <asm/mpc10x.h>
+#include <asm/pci-bridge.h>
+
+static struct mtd_partition linkstation_physmap_partitions[] = {
+       {
+               .name   = "mtd_firmimg",
+               .offset = 0x000000,
+               .size   = 0x300000,
+       },
+       {
+               .name   = "mtd_bootcode",
+               .offset = 0x300000,
+               .size   = 0x070000,
+       },
+       {
+               .name   = "mtd_status",
+               .offset = 0x370000,
+               .size   = 0x010000,
+       },
+       {
+               .name   = "mtd_conf",
+               .offset = 0x380000,
+               .size   = 0x080000,
+       },
+       {
+               .name   = "mtd_allflash",
+               .offset = 0x000000,
+               .size   = 0x400000,
+       },
+       {
+               .name   = "mtd_data",
+               .offset = 0x310000,
+               .size   = 0x0f0000,
+       },
+};
+
+static int __init add_bridge(struct device_node *dev)
+{
+       int len;
+       struct pci_controller *hose;
+       int *bus_range;
+
+       printk("Adding PCI host bridge %s\n", dev->full_name);
+
+       bus_range = (int *) get_property(dev, "bus-range", &len);
+       if (bus_range == NULL || len < 2 * sizeof(int))
+               printk(KERN_WARNING "Can't get bus-range for %s, assume"
+                               " bus 0\n", dev->full_name);
+
+       hose = pcibios_alloc_controller();
+       if (hose == NULL)
+               return -ENOMEM;
+       hose->first_busno = bus_range ? bus_range[0] : 0;
+       hose->last_busno = bus_range ? bus_range[1] : 0xff;
+       hose->arch_data = dev;
+       setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
+
+       /* Interpret the "ranges" property */
+       /* This also maps the I/O region and sets isa_io/mem_base */
+       pci_process_bridge_OF_ranges(hose, dev, 1);
+
+       return 0;
+}
+
+static void __init linkstation_setup_arch(void)
+{
+       struct device_node *np;
+#ifdef CONFIG_MTD_PHYSMAP
+       physmap_set_partitions(linkstation_physmap_partitions,
+                              ARRAY_SIZE(linkstation_physmap_partitions));
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (initrd_start)
+               ROOT_DEV = Root_RAM0;
+       else
+#endif
+#ifdef CONFIG_ROOT_NFS
+               ROOT_DEV = Root_NFS;
+#else
+               ROOT_DEV = Root_HDA1;
+#endif
+
+       /* Lookup PCI host bridges */
+       for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+               add_bridge(np);
+
+       printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
+       printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
+}
+
+/*
+ * Interrupt setup and service.  Interrrupts on the linkstation come
+ * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
+ */
+static void __init linkstation_init_IRQ(void)
+{
+       struct mpic *mpic;
+       struct device_node *dnp;
+       void *prop;
+       int size;
+       phys_addr_t paddr;
+
+       dnp = of_find_node_by_type(NULL, "open-pic");
+       if (dnp == NULL)
+               return;
+
+       prop = (struct device_node *)get_property(dnp, "reg", &size);
+       paddr = (phys_addr_t)of_translate_address(dnp, prop);
+
+       mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET, 4, 32, " EPIC     ");
+       BUG_ON(mpic == NULL);
+
+       /* PCI IRQs */
+       mpic_assign_isu(mpic, 0, paddr + 0x10200);
+
+       /* I2C */
+       mpic_assign_isu(mpic, 1, paddr + 0x11000);
+
+       /* ttyS0, ttyS1 */
+       mpic_assign_isu(mpic, 2, paddr + 0x11100);
+
+       mpic_init(mpic);
+}
+
+extern void avr_uart_configure(void);
+extern void avr_uart_send(const char);
+
+static void linkstation_restart(char *cmd)
+{
+       local_irq_disable();
+
+       /* Reset system via AVR */
+       avr_uart_configure();
+       /* Send reboot command */
+       avr_uart_send('C');
+
+       for(;;)  /* Spin until reset happens */
+               avr_uart_send('G');     /* "kick" */
+}
+
+static void linkstation_power_off(void)
+{
+       local_irq_disable();
+
+       /* Power down system via AVR */
+       avr_uart_configure();
+       /* send shutdown command */
+       avr_uart_send('E');
+
+       for(;;)  /* Spin until power-off happens */
+               avr_uart_send('G');     /* "kick" */
+       /* NOTREACHED */
+}
+
+static void linkstation_halt(void)
+{
+       linkstation_power_off();
+       /* NOTREACHED */
+}
+
+static void linkstation_show_cpuinfo(struct seq_file *m)
+{
+       seq_printf(m, "vendor\t\t: Buffalo Technology\n");
+       seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n");
+}
+
+static int __init linkstation_probe(void)
+{
+       unsigned long root;
+
+       root = of_get_flat_dt_root();
+
+       if (!of_flat_dt_is_compatible(root, "linkstation"))
+               return 0;
+       return 1;
+}
+
+define_machine(linkstation){
+       .name                   = "Buffalo Linkstation",
+       .probe                  = linkstation_probe,
+       .setup_arch             = linkstation_setup_arch,
+       .init_IRQ               = linkstation_init_IRQ,
+       .show_cpuinfo           = linkstation_show_cpuinfo,
+       .get_irq                = mpic_get_irq,
+       .restart                = linkstation_restart,
+       .power_off              = linkstation_power_off,
+       .halt                   = linkstation_halt,
+       .calibrate_decr         = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
new file mode 100644 (file)
index 0000000..0e83776
--- /dev/null
@@ -0,0 +1,131 @@
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <asm/io.h>
+#include <asm/mpc10x.h>
+#include <asm/ppc_sys.h>
+#include <asm/prom.h>
+#include <asm/termbits.h>
+
+static void __iomem *avr_addr;
+static unsigned long avr_clock;
+
+static struct work_struct wd_work;
+
+static void wd_stop(struct work_struct *unused)
+{
+       const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
+       int i = 0, rescue = 8;
+       int len = strlen(string);
+
+       while (rescue--) {
+               int j;
+               char lsr = in_8(avr_addr + UART_LSR);
+
+               if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
+                       for (j = 0; j < 16 && i < len; j++, i++)
+                               out_8(avr_addr + UART_TX, string[i]);
+                       if (i == len) {
+                               /* Read "OK" back: 4ms for the last "KKKK"
+                                  plus a couple bytes back */
+                               msleep(7);
+                               printk("linkstation: disarming the AVR watchdog: ");
+                               while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
+                                       printk("%c", in_8(avr_addr + UART_RX));
+                               break;
+                       }
+               }
+               msleep(17);
+       }
+       printk("\n");
+}
+
+#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
+
+void avr_uart_configure(void)
+{
+       unsigned char cval = UART_LCR_WLEN8;
+       unsigned int quot = AVR_QUOT(avr_clock);
+
+       if (!avr_addr || !avr_clock)
+               return;
+
+       out_8(avr_addr + UART_LCR, cval);                       /* initialise UART */
+       out_8(avr_addr + UART_MCR, 0);
+       out_8(avr_addr + UART_IER, 0);
+
+       cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
+
+       out_8(avr_addr + UART_LCR, cval);                       /* Set character format */
+
+       out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB);       /* set DLAB */
+       out_8(avr_addr + UART_DLL, quot & 0xff);                /* LS of divisor */
+       out_8(avr_addr + UART_DLM, quot >> 8);                  /* MS of divisor */
+       out_8(avr_addr + UART_LCR, cval);                       /* reset DLAB */
+       out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO);       /* enable FIFO */
+}
+
+void avr_uart_send(const char c)
+{
+       if (!avr_addr || !avr_clock)
+               return;
+
+       out_8(avr_addr + UART_TX, c);
+       out_8(avr_addr + UART_TX, c);
+       out_8(avr_addr + UART_TX, c);
+       out_8(avr_addr + UART_TX, c);
+}
+
+static void __init ls_uart_init(void)
+{
+       local_irq_disable();
+
+#ifndef CONFIG_SERIAL_8250
+       out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO);       /* enable FIFO */
+       out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
+             UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);       /* clear FIFOs */
+       out_8(avr_addr + UART_FCR, 0);
+       out_8(avr_addr + UART_IER, 0);
+
+       /* Clear up interrupts */
+       (void) in_8(avr_addr + UART_LSR);
+       (void) in_8(avr_addr + UART_RX);
+       (void) in_8(avr_addr + UART_IIR);
+       (void) in_8(avr_addr + UART_MSR);
+#endif
+       avr_uart_configure();
+
+       local_irq_enable();
+}
+
+static int __init ls_uarts_init(void)
+{
+       struct device_node *avr;
+       phys_addr_t phys_addr;
+       int len;
+
+       avr = of_find_node_by_path("/soc10x/serial@80004500");
+       if (!avr)
+               return -EINVAL;
+
+       avr_clock = *(u32*)get_property(avr, "clock-frequency", &len);
+       phys_addr = ((u32*)get_property(avr, "reg", &len))[0];
+
+       if (!avr_clock || !phys_addr)
+               return -EINVAL;
+
+       avr_addr = ioremap(phys_addr, 32);
+       if (!avr_addr)
+               return -EFAULT;
+
+       ls_uart_init();
+
+       INIT_WORK(&wd_work, wd_stop);
+       schedule_work(&wd_work);
+
+       return 0;
+}
+
+late_initcall(ls_uarts_init);
index bdb475c65cba267bc039bb8efc2b30368a284b21..3fcc85f60fbf2d78b7ee8ba29c8733026e951fcd 100644 (file)
@@ -60,7 +60,7 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
 
 extern int tsi108_setup_pci(struct device_node *dev);
 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
-extern void tsi108_pci_int_init(void);
+extern void tsi108_pci_int_init(struct device_node *node);
 extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
 
 int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
@@ -71,65 +71,6 @@ int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
                return PCIBIOS_SUCCESSFUL;
 }
 
-/*
- * find pci slot by devfn in interrupt map of OF tree
- */
-u8 find_slot_by_devfn(unsigned int *interrupt_map, unsigned int devfn)
-{
-       int i;
-       unsigned int tmp;
-       for (i = 0; i < 4; i++){
-               tmp = interrupt_map[i*4*7];
-               if ((tmp >> 11) == (devfn >> 3))
-                       return i;
-       }
-       return i;
-}
-
-/*
- * Scans the interrupt map for pci device
- */
-void mpc7448_hpc2_fixup_irq(struct pci_dev *dev)
-{
-       struct pci_controller *hose;
-       struct device_node *node;
-       const unsigned int *interrupt;
-       int busnr;
-       int len;
-       u8 slot;
-       u8 pin;
-
-       /* Lookup the hose */
-       busnr = dev->bus->number;
-       hose = pci_bus_to_hose(busnr);
-       if (!hose)
-               printk(KERN_ERR "No pci hose found\n");
-
-       /* Check it has an OF node associated */
-       node = (struct device_node *) hose->arch_data;
-       if (!node)
-               printk(KERN_ERR "No pci node found\n");
-
-       interrupt = get_property(node, "interrupt-map", &len);
-       slot = find_slot_by_devfn(interrupt, dev->devfn);
-       pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
-       if (pin == 0 || pin > 4)
-               pin = 1;
-       pin--;
-       dev->irq  = interrupt[slot*4*7 + pin*7 + 5];
-       DBG("TSI_PCI: dev->irq = 0x%x\n", dev->irq);
-}
-/* temporary pci irq map fixup*/
-
-void __init mpc7448_hpc2_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-       for_each_pci_dev(dev) {
-               mpc7448_hpc2_fixup_irq(dev);
-               pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
-       }
-}
-
 static void __init mpc7448_hpc2_setup_arch(void)
 {
        struct device_node *cpu;
@@ -192,9 +133,12 @@ static void __init mpc7448_hpc2_init_IRQ(void)
 {
        struct mpic *mpic;
        phys_addr_t mpic_paddr = 0;
+       struct device_node *tsi_pic;
+#ifdef CONFIG_PCI
        unsigned int cascade_pci_irq;
        struct device_node *tsi_pci;
-       struct device_node *tsi_pic;
+       struct device_node *cascade_node = NULL;
+#endif
 
        tsi_pic = of_find_node_by_type(NULL, "open-pic");
        if (tsi_pic) {
@@ -208,31 +152,41 @@ static void __init mpc7448_hpc2_init_IRQ(void)
                return;
        }
 
-       DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
+       DBG("%s: tsi108 pic phys_addr = 0x%x\n", __FUNCTION__,
            (u32) mpic_paddr);
 
        mpic = mpic_alloc(tsi_pic, mpic_paddr,
                        MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
                        MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
-                       0, /* num_sources used */
-                       0, /* num_sources used */
+                       24,
+                       NR_IRQS-4, /* num_sources used */
                        "Tsi108_PIC");
 
-       BUG_ON(mpic == NULL); /* XXXX */
+       BUG_ON(mpic == NULL);
+
+       mpic_assign_isu(mpic, 0, mpic_paddr + 0x100);
+
        mpic_init(mpic);
 
+#ifdef CONFIG_PCI
        tsi_pci = of_find_node_by_type(NULL, "pci");
-       if (tsi_pci == 0) {
+       if (tsi_pci == NULL) {
                printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
                return;
        }
+       cascade_node = of_find_node_by_type(NULL, "pic-router");
+       if (cascade_node == NULL) {
+               printk("%s: No tsi108 pci cascade node found !\n", __FUNCTION__);
+               return;
+       }
 
        cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
+       DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __FUNCTION__,
+           (u32) cascade_pci_irq);
+       tsi108_pci_int_init(cascade_node);
        set_irq_data(cascade_pci_irq, mpic);
        set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
-
-       tsi108_pci_int_init();
-
+#endif
        /* Configure MPIC outputs to CPU0 */
        tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
        of_node_put(tsi_pic);
@@ -290,7 +244,6 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
                return 1;
        }
        return 0;
-
 }
 
 define_machine(mpc7448_hpc2){
@@ -300,7 +253,6 @@ define_machine(mpc7448_hpc2){
        .init_IRQ               = mpc7448_hpc2_init_IRQ,
        .show_cpuinfo           = mpc7448_hpc2_show_cpuinfo,
        .get_irq                = mpic_get_irq,
-       .pcibios_fixup          = mpc7448_hpc2_pcibios_fixup,
        .restart                = mpc7448_hpc2_restart,
        .calibrate_decr         = generic_calibrate_decr,
        .machine_check_exception= mpc7448_machine_check_exception,
index dee4eb4d8bec05082e30510f813c956e1d6ff9a3..13ac3015d91c95be313e33f344bbc0ac8d4b2a7d 100644 (file)
@@ -1,5 +1,7 @@
 EXTRA_CFLAGS   += -mno-minimal-toc
 
+extra-y += dt.o
+
 obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt_mod.o mf.o lpevents.o \
        hvcall.o proc.o htab.o iommu.o misc.o irq.o
 obj-$(CONFIG_PCI) += pci.o vpdinfo.o
@@ -7,5 +9,9 @@ obj-$(CONFIG_SMP) += smp.o
 obj-$(CONFIG_VIOPATH) += viopath.o
 obj-$(CONFIG_MODULES) += ksyms.o
 
+quiet_cmd_dt_strings = DT_STR  $@
+      cmd_dt_strings = $(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings \
+                               $< $@
+
 $(obj)/dt_mod.o:       $(obj)/dt.o
-       @$(OBJCOPY) --rename-section .rodata.str1.8=.dt_strings $(obj)/dt.o $(obj)/dt_mod.o
+       $(call if_changed,dt_strings)
index e305deee7f44e19d08d41922f581836d66a743d4..9e8a334a518a8cc1dd9f9224c4e88399d1a45a59 100644 (file)
@@ -41,6 +41,7 @@
 #include "call_pci.h"
 #include "pci.h"
 #include "it_exp_vpd_panel.h"
+#include "naca.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -205,13 +206,11 @@ static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
        dt_prop(dt, name, &data, sizeof(u32));
 }
 
-#ifdef notyet
 static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
                u64 data)
 {
        dt_prop(dt, name, &data, sizeof(u64));
 }
-#endif
 
 static void __init dt_prop_u64_list(struct iseries_flat_dt *dt,
                const char *name, u64 *data, int n)
@@ -306,6 +305,17 @@ static void __init dt_model(struct iseries_flat_dt *dt)
        dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
 }
 
+static void __init dt_initrd(struct iseries_flat_dt *dt)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       if (naca.xRamDisk) {
+               dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
+               dt_prop_u64(dt, "linux,initrd-end",
+                       (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
+       }
+#endif
+}
+
 static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
                const char *name, u32 reg, int unit,
                const char *type, const char *compat, int end)
@@ -641,6 +651,7 @@ void * __init build_flat_dt(unsigned long phys_mem_size)
        /* /chosen */
        dt_start_node(iseries_dt, "chosen");
        dt_prop_str(iseries_dt, "bootargs", cmd_line);
+       dt_initrd(iseries_dt);
        dt_end_node(iseries_dt);
 
        dt_cpus(iseries_dt);
index 218817d13c5cd9943b5bdea6d76ce1f993286117..d7a756d5135cf4ee0ba6366106f69a2b639658db 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/dma-mapping.h>
 #include <linux/list.h>
+#include <linux/pci.h>
 
 #include <asm/iommu.h>
 #include <asm/tce.h>
@@ -114,12 +115,10 @@ void iommu_table_getparms_iSeries(unsigned long busno,
 {
        struct iommu_table_cb *parms;
 
-       parms = kmalloc(sizeof(*parms), GFP_KERNEL);
+       parms = kzalloc(sizeof(*parms), GFP_KERNEL);
        if (parms == NULL)
                panic("PCI_DMA: TCE Table Allocation failed.");
 
-       memset(parms, 0, sizeof(*parms));
-
        parms->itc_busno = busno;
        parms->itc_slotno = slotno;
        parms->itc_virtbus = virtbus;
@@ -168,7 +167,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
 }
 
 
-void iommu_devnode_init_iSeries(struct device_node *dn)
+void iommu_devnode_init_iSeries(struct pci_dev *pdev, struct device_node *dn)
 {
        struct iommu_table *tbl;
        struct pci_dn *pdn = PCI_DN(dn);
@@ -186,19 +185,14 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
                pdn->iommu_table = iommu_init_table(tbl, -1);
        else
                kfree(tbl);
+       pdev->dev.archdata.dma_data = pdn->iommu_table;
 }
 #endif
 
-static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
-static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
-
 void iommu_init_early_iSeries(void)
 {
        ppc_md.tce_build = tce_build_iSeries;
        ppc_md.tce_free  = tce_free_iSeries;
 
-       ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
-       ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
-
-       pci_iommu_init();
+       pci_dma_ops = &dma_iommu_ops;
 }
index a2200842f4e59a47075282d65b798497689657c8..2430848b98e7835fb7a52acb9d0200f8fa1d9e54 100644 (file)
@@ -19,9 +19,3 @@ EXPORT_SYMBOL(HvCall4);
 EXPORT_SYMBOL(HvCall5);
 EXPORT_SYMBOL(HvCall6);
 EXPORT_SYMBOL(HvCall7);
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_get_flags);
-EXPORT_SYMBOL(local_irq_disable);
-EXPORT_SYMBOL(local_irq_restore);
-#endif
index 7641fc7e550a0ec1603dabab9cc917a8dfecdb72..2c6ff0fdac9807079fe48de4f787748a25d0afd5 100644 (file)
 
        .text
 
-/* unsigned long local_save_flags(void) */
-_GLOBAL(local_get_flags)
-       lbz     r3,PACAPROCENABLED(r13)
-       blr
-
-/* unsigned long local_irq_disable(void) */
-_GLOBAL(local_irq_disable)
-       lbz     r3,PACAPROCENABLED(r13)
-       li      r4,0
-       stb     r4,PACAPROCENABLED(r13)
-       blr                     /* Done */
-
-/* void local_irq_restore(unsigned long flags) */
-_GLOBAL(local_irq_restore)
-       lbz     r5,PACAPROCENABLED(r13)
-        /* Check if things are setup the way we want _already_. */
-       cmpw    0,r3,r5
-       beqlr
-       /* are we enabling interrupts? */
-       cmpdi   0,r3,0
-       stb     r3,PACAPROCENABLED(r13)
-       beqlr
-       /* Check pending interrupts */
-       /*   A decrementer, IPI or PMC interrupt may have occurred
-        *   while we were in the hypervisor (which enables) */
-       ld      r4,PACALPPACAPTR(r13)
-       ld      r4,LPPACAANYINT(r4)
-       cmpdi   r4,0
-       beqlr
-
-       /*
-        * Handle pending interrupts in interrupt context
-        */
+/* Handle pending interrupts in interrupt context */
+_GLOBAL(iseries_handle_interrupts)
        li      r0,0x5555
        sc
        blr
index 4aa165e010d91966214dc01774ef5037f6044b2d..4a06d9c349869cdcb1fda45c516034b8cf992867 100644 (file)
@@ -155,53 +155,6 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
               Error_Text, Bus, SubBus, AgentId, HvRc);
 }
 
-/*
- * iSeries_pcibios_init
- *
- * Description:
- *   This function checks for all possible system PCI host bridges that connect
- *   PCI buses.  The system hypervisor is queried as to the guest partition
- *   ownership status.  A pci_controller is built for any bus which is partially
- *   owned or fully owned by this guest partition.
- */
-void iSeries_pcibios_init(void)
-{
-       struct pci_controller *phb;
-       struct device_node *root = of_find_node_by_path("/");
-       struct device_node *node = NULL;
-
-       if (root == NULL) {
-               printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
-                               "of device tree\n");
-               return;
-       }
-       while ((node = of_get_next_child(root, node)) != NULL) {
-               HvBusNumber bus;
-               const u32 *busp;
-
-               if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
-                       continue;
-
-               busp = get_property(node, "bus-range", NULL);
-               if (busp == NULL)
-                       continue;
-               bus = *busp;
-               printk("bus %d appears to exist\n", bus);
-               phb = pcibios_alloc_controller(node);
-               if (phb == NULL)
-                       continue;
-
-               phb->pci_mem_offset = phb->local_number = bus;
-               phb->first_busno = bus;
-               phb->last_busno = bus;
-               phb->ops = &iSeries_pci_ops;
-       }
-
-       of_node_put(root);
-
-       pci_devs_phb_init();
-}
-
 /*
  * iSeries_pci_final_fixup(void)
  */
@@ -253,7 +206,7 @@ void __init iSeries_pci_final_fixup(void)
                        PCI_DN(node)->pcidev = pdev;
                        allocate_device_bars(pdev);
                        iSeries_Device_Information(pdev, DeviceCount);
-                       iommu_devnode_init_iSeries(node);
+                       iommu_devnode_init_iSeries(pdev, node);
                } else
                        printk("PCI: Device Tree not found for 0x%016lX\n",
                                        (unsigned long)pdev);
@@ -438,11 +391,7 @@ static inline struct device_node *xlate_iomm_address(
 /*
  * Read MM I/O Instructions for the iSeries
  * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
- * else, data is returned in big Endian format.
- *
- * iSeries_Read_Byte = Read Byte  ( 8 bit)
- * iSeries_Read_Word = Read Word  (16 bit)
- * iSeries_Read_Long = Read Long  (32 bit)
+ * else, data is returned in Big Endian format.
  */
 static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
 {
@@ -462,14 +411,15 @@ static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
                        num_printed = 0;
                }
                if (num_printed++ < 10)
-                       printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
+                       printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n",
+                              IoAddress);
                return 0xff;
        }
        do {
                HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
        } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
 
-       return (u8)ret.value;
+       return ret.value;
 }
 
 static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
@@ -490,7 +440,8 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
                        num_printed = 0;
                }
                if (num_printed++ < 10)
-                       printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
+                       printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n",
+                              IoAddress);
                return 0xffff;
        }
        do {
@@ -498,7 +449,7 @@ static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
                                BarOffset, 0);
        } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
 
-       return swab16((u16)ret.value);
+       return ret.value;
 }
 
 static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
@@ -519,7 +470,8 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
                        num_printed = 0;
                }
                if (num_printed++ < 10)
-                       printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
+                       printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n",
+                              IoAddress);
                return 0xffffffff;
        }
        do {
@@ -527,15 +479,12 @@ static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
                                BarOffset, 0);
        } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
 
-       return swab32((u32)ret.value);
+       return ret.value;
 }
 
 /*
  * Write MM I/O Instructions for the iSeries
  *
- * iSeries_Write_Byte = Write Byte (8 bit)
- * iSeries_Write_Word = Write Word(16 bit)
- * iSeries_Write_Long = Write Long(32 bit)
  */
 static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
 {
@@ -581,11 +530,12 @@ static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
                        num_printed = 0;
                }
                if (num_printed++ < 10)
-                       printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
+                       printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n",
+                              IoAddress);
                return;
        }
        do {
-               rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
+               rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, data, 0);
        } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
 }
 
@@ -607,231 +557,221 @@ static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
                        num_printed = 0;
                }
                if (num_printed++ < 10)
-                       printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
+                       printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n",
+                              IoAddress);
                return;
        }
        do {
-               rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
+               rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, data, 0);
        } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
 }
 
-extern unsigned char __raw_readb(const volatile void __iomem *addr)
+static u8 iseries_readb(const volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return *(volatile unsigned char __force *)addr;
+       return iSeries_Read_Byte(addr);
 }
-EXPORT_SYMBOL(__raw_readb);
 
-extern unsigned short __raw_readw(const volatile void __iomem *addr)
+static u16 iseries_readw(const volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return *(volatile unsigned short __force *)addr;
+       return le16_to_cpu(iSeries_Read_Word(addr));
 }
-EXPORT_SYMBOL(__raw_readw);
 
-extern unsigned int __raw_readl(const volatile void __iomem *addr)
+static u32 iseries_readl(const volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return *(volatile unsigned int __force *)addr;
+       return le32_to_cpu(iSeries_Read_Long(addr));
 }
-EXPORT_SYMBOL(__raw_readl);
 
-extern unsigned long __raw_readq(const volatile void __iomem *addr)
+static u16 iseries_readw_be(const volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return *(volatile unsigned long __force *)addr;
+       return iSeries_Read_Word(addr);
 }
-EXPORT_SYMBOL(__raw_readq);
 
-extern void __raw_writeb(unsigned char v, volatile void __iomem *addr)
+static u32 iseries_readl_be(const volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       *(volatile unsigned char __force *)addr = v;
+       return iSeries_Read_Long(addr);
 }
-EXPORT_SYMBOL(__raw_writeb);
 
-extern void __raw_writew(unsigned short v, volatile void __iomem *addr)
+static void iseries_writeb(u8 data, volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       *(volatile unsigned short __force *)addr = v;
+       iSeries_Write_Byte(data, addr);
 }
-EXPORT_SYMBOL(__raw_writew);
 
-extern void __raw_writel(unsigned int v, volatile void __iomem *addr)
+static void iseries_writew(u16 data, volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       *(volatile unsigned int __force *)addr = v;
+       iSeries_Write_Word(cpu_to_le16(data), addr);
 }
-EXPORT_SYMBOL(__raw_writel);
 
-extern void __raw_writeq(unsigned long v, volatile void __iomem *addr)
+static void iseries_writel(u32 data, volatile void __iomem *addr)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       *(volatile unsigned long __force *)addr = v;
+       iSeries_Write_Long(cpu_to_le32(data), addr);
 }
-EXPORT_SYMBOL(__raw_writeq);
 
-int in_8(const volatile unsigned char __iomem *addr)
+static void iseries_writew_be(u16 data, volatile void __iomem *addr)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               return iSeries_Read_Byte(addr);
-       return __in_8(addr);
+       iSeries_Write_Word(data, addr);
 }
-EXPORT_SYMBOL(in_8);
 
-void out_8(volatile unsigned char __iomem *addr, int val)
+static void iseries_writel_be(u32 data, volatile void __iomem *addr)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               iSeries_Write_Byte(val, addr);
-       else
-               __out_8(addr, val);
+       iSeries_Write_Long(data, addr);
 }
-EXPORT_SYMBOL(out_8);
 
-int in_le16(const volatile unsigned short __iomem *addr)
+static void iseries_readsb(const volatile void __iomem *addr, void *buf,
+                          unsigned long count)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               return iSeries_Read_Word(addr);
-       return __in_le16(addr);
+       u8 *dst = buf;
+       while(count-- > 0)
+               *(dst++) = iSeries_Read_Byte(addr);
 }
-EXPORT_SYMBOL(in_le16);
 
-int in_be16(const volatile unsigned short __iomem *addr)
+static void iseries_readsw(const volatile void __iomem *addr, void *buf,
+                          unsigned long count)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return __in_be16(addr);
+       u16 *dst = buf;
+       while(count-- > 0)
+               *(dst++) = iSeries_Read_Word(addr);
 }
-EXPORT_SYMBOL(in_be16);
 
-void out_le16(volatile unsigned short __iomem *addr, int val)
+static void iseries_readsl(const volatile void __iomem *addr, void *buf,
+                          unsigned long count)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               iSeries_Write_Word(val, addr);
-       else
-               __out_le16(addr, val);
+       u32 *dst = buf;
+       while(count-- > 0)
+               *(dst++) = iSeries_Read_Long(addr);
 }
-EXPORT_SYMBOL(out_le16);
 
-void out_be16(volatile unsigned short __iomem *addr, int val)
+static void iseries_writesb(volatile void __iomem *addr, const void *buf,
+                           unsigned long count)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       __out_be16(addr, val);
+       const u8 *src = buf;
+       while(count-- > 0)
+               iSeries_Write_Byte(*(src++), addr);
 }
-EXPORT_SYMBOL(out_be16);
 
-unsigned in_le32(const volatile unsigned __iomem *addr)
+static void iseries_writesw(volatile void __iomem *addr, const void *buf,
+                           unsigned long count)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               return iSeries_Read_Long(addr);
-       return __in_le32(addr);
+       const u16 *src = buf;
+       while(count-- > 0)
+               iSeries_Write_Word(*(src++), addr);
 }
-EXPORT_SYMBOL(in_le32);
 
-unsigned in_be32(const volatile unsigned __iomem *addr)
+static void iseries_writesl(volatile void __iomem *addr, const void *buf,
+                           unsigned long count)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       return __in_be32(addr);
+       const u32 *src = buf;
+       while(count-- > 0)
+               iSeries_Write_Long(*(src++), addr);
 }
-EXPORT_SYMBOL(in_be32);
 
-void out_le32(volatile unsigned __iomem *addr, int val)
+static void iseries_memset_io(volatile void __iomem *addr, int c,
+                             unsigned long n)
 {
-       if (firmware_has_feature(FW_FEATURE_ISERIES))
-               iSeries_Write_Long(val, addr);
-       else
-               __out_le32(addr, val);
-}
-EXPORT_SYMBOL(out_le32);
+       volatile char __iomem *d = addr;
 
-void out_be32(volatile unsigned __iomem *addr, int val)
-{
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
-
-       __out_be32(addr, val);
+       while (n-- > 0)
+               iSeries_Write_Byte(c, d++);
 }
-EXPORT_SYMBOL(out_be32);
 
-unsigned long in_le64(const volatile unsigned long __iomem *addr)
+static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
+                                 unsigned long n)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+       char *d = dest;
+       const volatile char __iomem *s = src;
 
-       return __in_le64(addr);
+       while (n-- > 0)
+               *d++ = iSeries_Read_Byte(s++);
 }
-EXPORT_SYMBOL(in_le64);
 
-unsigned long in_be64(const volatile unsigned long __iomem *addr)
+static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
+                               unsigned long n)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+       const char *s = src;
+       volatile char __iomem *d = dest;
 
-       return __in_be64(addr);
+       while (n-- > 0)
+               iSeries_Write_Byte(*s++, d++);
 }
-EXPORT_SYMBOL(in_be64);
-
-void out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
 
-       __out_le64(addr, val);
-}
-EXPORT_SYMBOL(out_le64);
+/* We only set MMIO ops. The default PIO ops will be default
+ * to the MMIO ops + pci_io_base which is 0 on iSeries as
+ * expected so both should work.
+ *
+ * Note that we don't implement the readq/writeq versions as
+ * I don't know of an HV call for doing so. Thus, the default
+ * operation will be used instead, which will fault a the value
+ * return by iSeries for MMIO addresses always hits a non mapped
+ * area. This is as good as the BUG() we used to have there.
+ */
+static struct ppc_pci_io __initdata iseries_pci_io = {
+       .readb = iseries_readb,
+       .readw = iseries_readw,
+       .readl = iseries_readl,
+       .readw_be = iseries_readw_be,
+       .readl_be = iseries_readl_be,
+       .writeb = iseries_writeb,
+       .writew = iseries_writew,
+       .writel = iseries_writel,
+       .writew_be = iseries_writew_be,
+       .writel_be = iseries_writel_be,
+       .readsb = iseries_readsb,
+       .readsw = iseries_readsw,
+       .readsl = iseries_readsl,
+       .writesb = iseries_writesb,
+       .writesw = iseries_writesw,
+       .writesl = iseries_writesl,
+       .memset_io = iseries_memset_io,
+       .memcpy_fromio = iseries_memcpy_fromio,
+       .memcpy_toio = iseries_memcpy_toio,
+};
 
-void out_be64(volatile unsigned long __iomem *addr, unsigned long val)
+/*
+ * iSeries_pcibios_init
+ *
+ * Description:
+ *   This function checks for all possible system PCI host bridges that connect
+ *   PCI buses.  The system hypervisor is queried as to the guest partition
+ *   ownership status.  A pci_controller is built for any bus which is partially
+ *   owned or fully owned by this guest partition.
+ */
+void __init iSeries_pcibios_init(void)
 {
-       BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES));
+       struct pci_controller *phb;
+       struct device_node *root = of_find_node_by_path("/");
+       struct device_node *node = NULL;
 
-       __out_be64(addr, val);
-}
-EXPORT_SYMBOL(out_be64);
+       /* Install IO hooks */
+       ppc_pci_io = iseries_pci_io;
 
-void memset_io(volatile void __iomem *addr, int c, unsigned long n)
-{
-       if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-               volatile char __iomem *d = addr;
+       if (root == NULL) {
+               printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
+                               "of device tree\n");
+               return;
+       }
+       while ((node = of_get_next_child(root, node)) != NULL) {
+               HvBusNumber bus;
+               const u32 *busp;
 
-               while (n-- > 0) {
-                       iSeries_Write_Byte(c, d++);
-               }
-       } else
-               eeh_memset_io(addr, c, n);
-}
-EXPORT_SYMBOL(memset_io);
+               if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
+                       continue;
 
-void memcpy_fromio(void *dest, const volatile void __iomem *src,
-                                 unsigned long n)
-{
-       if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-               char *d = dest;
-               const volatile char __iomem *s = src;
+               busp = get_property(node, "bus-range", NULL);
+               if (busp == NULL)
+                       continue;
+               bus = *busp;
+               printk("bus %d appears to exist\n", bus);
+               phb = pcibios_alloc_controller(node);
+               if (phb == NULL)
+                       continue;
 
-               while (n-- > 0) {
-                       *d++ = iSeries_Read_Byte(s++);
-               }
-       } else
-               eeh_memcpy_fromio(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy_fromio);
+               phb->pci_mem_offset = phb->local_number = bus;
+               phb->first_busno = bus;
+               phb->last_busno = bus;
+               phb->ops = &iSeries_pci_ops;
+       }
 
-void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
-{
-       if (firmware_has_feature(FW_FEATURE_ISERIES)) {
-               const char *s = src;
-               volatile char __iomem *d = dest;
+       of_node_put(root);
 
-               while (n-- > 0) {
-                       iSeries_Write_Byte(*s++, d++);
-               }
-       } else
-               eeh_memcpy_toio(dest, src, n);
+       pci_devs_phb_init();
 }
-EXPORT_SYMBOL(memcpy_toio);
+
index 6f73469fd3b0c64b1a753003af5e3a93a1fef072..bdf2afbb60c1c23556e0f4c38a6e7a082ce2e580 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/smp.h>
 #include <linux/param.h>
 #include <linux/string.h>
-#include <linux/initrd.h>
 #include <linux/seq_file.h>
 #include <linux/kdev_t.h>
 #include <linux/major.h>
@@ -80,8 +79,6 @@ extern void iSeries_pci_final_fixup(void);
 static void iSeries_pci_final_fixup(void) { }
 #endif
 
-extern int rd_size;            /* Defined in drivers/block/rd.c */
-
 extern unsigned long iSeries_recal_tb;
 extern unsigned long iSeries_recal_titan;
 
@@ -295,24 +292,6 @@ static void __init iSeries_init_early(void)
 {
        DBG(" -> iSeries_init_early()\n");
 
-#if defined(CONFIG_BLK_DEV_INITRD)
-       /*
-        * If the init RAM disk has been configured and there is
-        * a non-zero starting address for it, set it up
-        */
-       if (naca.xRamDisk) {
-               initrd_start = (unsigned long)__va(naca.xRamDisk);
-               initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
-               initrd_below_start_ok = 1;      // ramdisk in kernel space
-               ROOT_DEV = Root_RAM0;
-               if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
-                       rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
-       } else
-#endif /* CONFIG_BLK_DEV_INITRD */
-       {
-           /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
-       }
-
        iSeries_recal_tb = get_tb();
        iSeries_recal_titan = HvCallXm_loadTod();
 
@@ -331,17 +310,6 @@ static void __init iSeries_init_early(void)
 
        mf_init();
 
-       /* If we were passed an initrd, set the ROOT_DEV properly if the values
-        * look sensible. If not, clear initrd reference.
-        */
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
-           initrd_end > initrd_start)
-               ROOT_DEV = Root_RAM0;
-       else
-               initrd_start = initrd_end = 0;
-#endif /* CONFIG_BLK_DEV_INITRD */
-
        DBG(" <- iSeries_init_early()\n");
 }
 
@@ -649,6 +617,16 @@ static void iseries_dedicated_idle(void)
 void __init iSeries_init_IRQ(void) { }
 #endif
 
+static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
+                                    unsigned long flags)
+{
+       return (void __iomem *)address;
+}
+
+static void iseries_iounmap(volatile void __iomem *token)
+{
+}
+
 /*
  * iSeries has no legacy IO, anything calling this function has to
  * fail or bad things will happen
@@ -665,6 +643,8 @@ static int __init iseries_probe(void)
                return 0;
 
        hpte_init_iSeries();
+       /* iSeries does not support 16M pages */
+       cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE;
 
        return 1;
 }
@@ -687,6 +667,8 @@ define_machine(iseries) {
        .progress       = iSeries_progress,
        .probe          = iseries_probe,
        .check_legacy_ioport    = iseries_check_legacy_ioport,
+       .ioremap        = iseries_ioremap,
+       .iounmap        = iseries_iounmap,
        /* XXX Implement enable_pmcs for iSeries */
 };
 
@@ -697,7 +679,7 @@ void * __init iSeries_early_setup(void)
        /* Identify CPU type. This is done again by the common code later
         * on but calling this function multiple times is fine.
         */
-       identify_cpu(0);
+       identify_cpu(0, mfspr(SPRN_PVR));
 
        powerpc_firmware_features |= FW_FEATURE_ISERIES;
        powerpc_firmware_features |= FW_FEATURE_LPAR;
index 04e07e5da0c10010d7e2c458fc4adb9f921b6779..84e7ee2c086f59f21bec9a99b0ce368b07b11676 100644 (file)
@@ -119,10 +119,9 @@ static int proc_viopath_show(struct seq_file *m, void *v)
        struct device_node *node;
        const char *sysid;
 
-       buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
+       buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
        if (!buf)
                return 0;
-       memset(buf, 0, HW_PAGE_SIZE);
 
        handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
                                DMA_FROM_DEVICE);
index 0657c579b8406081007ba83136aa4651d33b0877..c6911ddc479fc309bec1066a77fc044b6af7cd04 100644 (file)
@@ -8,5 +8,5 @@ extern void maple_get_rtc_time(struct rtc_time *tm);
 extern unsigned long maple_get_boot_time(void);
 extern void maple_calibrate_decr(void);
 extern void maple_pci_init(void);
-extern void maple_pcibios_fixup(void);
+extern void maple_pci_irq_fixup(struct pci_dev *dev);
 extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
index 63b4d1bff359b226a4dd882efbe8757dfcdd2e07..3a32deda765dab3487e1174467f923ceede19fc6 100644 (file)
@@ -502,38 +502,29 @@ static int __init add_bridge(struct device_node *dev)
 }
 
 
-void __init maple_pcibios_fixup(void)
+void __devinit maple_pci_irq_fixup(struct pci_dev *dev)
 {
-       struct pci_dev *dev = NULL;
-
-       DBG(" -> maple_pcibios_fixup\n");
-
-       for_each_pci_dev(dev) {
-               /* Fixup IRQ for PCIe host */
-               if (u4_pcie != NULL && dev->bus->number == 0 &&
-                   pci_bus_to_host(dev->bus) == u4_pcie) {
-                       printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
-                       dev->irq = irq_create_mapping(NULL, 1);
-                       if (dev->irq != NO_IRQ)
-                               set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
-                       continue;
-               }
-
-               /* Hide AMD8111 IDE interrupt when in legacy mode so
-                * the driver calls pci_get_legacy_ide_irq()
-                */
-               if (dev->vendor == PCI_VENDOR_ID_AMD &&
-                   dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
-                   (dev->class & 5) != 5) {
-                       dev->irq = NO_IRQ;
-                       continue;
-               }
+       DBG(" -> maple_pci_irq_fixup\n");
+
+       /* Fixup IRQ for PCIe host */
+       if (u4_pcie != NULL && dev->bus->number == 0 &&
+           pci_bus_to_host(dev->bus) == u4_pcie) {
+               printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
+               dev->irq = irq_create_mapping(NULL, 1);
+               if (dev->irq != NO_IRQ)
+                       set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+       }
 
-               /* For all others, map the interrupt from the device-tree */
-               pci_read_irq_line(dev);
+       /* Hide AMD8111 IDE interrupt when in legacy mode so
+        * the driver calls pci_get_legacy_ide_irq()
+        */
+       if (dev->vendor == PCI_VENDOR_ID_AMD &&
+           dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
+           (dev->class & 5) != 5) {
+               dev->irq = NO_IRQ;
        }
 
-       DBG(" <- maple_pcibios_fixup\n");
+       DBG(" <- maple_pci_irq_fixup\n");
 }
 
 static void __init maple_fixup_phb_resources(void)
index fe6b9bff61b9754cc968491df3e0362bae50ebb7..094989d50babaf0bdfe46db5e4b3ab1cd1531924 100644 (file)
@@ -312,7 +312,7 @@ define_machine(maple_md) {
        .setup_arch             = maple_setup_arch,
        .init_early             = maple_init_early,
        .init_IRQ               = maple_init_IRQ,
-       .pcibios_fixup          = maple_pcibios_fixup,
+       .pci_irq_fixup          = maple_pci_irq_fixup,
        .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
        .restart                = maple_restart,
        .power_off              = maple_power_off,
index fd71d72736b27a27d64aad78190d01396d717cb4..51c2a2397ecf100b8caba82c46c6b3af827f8309 100644 (file)
@@ -3,6 +3,5 @@
 
 extern unsigned long pas_get_boot_time(void);
 extern void pas_pci_init(void);
-extern void pas_pcibios_fixup(void);
 
 #endif /* _PASEMI_PASEMI_H */
index 39020c1fa13db6f609035d4d5d96af59c047e3a9..faa618e0404767217430eaa86d5a03f075efc8b8 100644 (file)
@@ -148,14 +148,6 @@ static int __init add_bridge(struct device_node *dev)
 }
 
 
-void __init pas_pcibios_fixup(void)
-{
-       struct pci_dev *dev = NULL;
-
-       for_each_pci_dev(dev)
-               pci_read_irq_line(dev);
-}
-
 static void __init pas_fixup_phb_resources(void)
 {
        struct pci_controller *hose, *tmp;
index 106896c3b60a56c0a119222444ea1895238bc8de..89d6e295dbf7171011ffd278fbf93efc06f45eb7 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/console.h>
+#include <linux/pci.h>
 
 #include <asm/prom.h>
 #include <asm/system.h>
@@ -71,6 +72,9 @@ void __init pas_setup_arch(void)
        /* Setup SMP callback */
        smp_ops = &pas_smp_ops;
 #endif
+       /* no iommu yet */
+       pci_dma_ops = &dma_direct_ops;
+
        /* Lookup PCI hosts */
        pas_pci_init();
 
@@ -81,17 +85,6 @@ void __init pas_setup_arch(void)
        printk(KERN_DEBUG "Using default idle loop\n");
 }
 
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
-static void __init pas_init_early(void)
-{
-       /* No iommu code yet */
-       ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-       ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-       pci_direct_iommu_init();
-}
-
 /* No legacy IO on our parts */
 static int pas_check_legacy_ioport(unsigned int baseport)
 {
@@ -173,10 +166,8 @@ define_machine(pas) {
        .name                   = "PA Semi PA6T-1682M",
        .probe                  = pas_probe,
        .setup_arch             = pas_setup_arch,
-       .init_early             = pas_init_early,
        .init_IRQ               = pas_init_IRQ,
        .get_irq                = mpic_get_irq,
-       .pcibios_fixup          = pas_pcibios_fixup,
        .restart                = pas_restart,
        .power_off              = pas_power_off,
        .halt                   = pas_halt,
index afa593a8544a82a82b09922d4f2c6795939b6b6f..c3a89414ddc099d1baf91fe28234fe87b75b02b5 100644 (file)
 
 #define OLD_BACKLIGHT_MAX 15
 
-static void pmac_backlight_key_worker(void *data);
-static void pmac_backlight_set_legacy_worker(void *data);
+static void pmac_backlight_key_worker(struct work_struct *work);
+static void pmac_backlight_set_legacy_worker(struct work_struct *work);
 
-static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL);
-static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL);
+static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
+static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
 
 /* Although these variables are used in interrupt context, it makes no sense to
  * protect them. No user is able to produce enough key events per second and
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value)
        return level;
 }
 
-static void pmac_backlight_key_worker(void *data)
+static void pmac_backlight_key_worker(struct work_struct *work)
 {
        if (atomic_read(&kernel_backlight_disabled))
                return;
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness)
        return error;
 }
 
-static void pmac_backlight_set_legacy_worker(void *data)
+static void pmac_backlight_set_legacy_worker(struct work_struct *work)
 {
        if (atomic_read(&kernel_backlight_disabled))
                return;
index e49621be66400103b0e69c0f22d1e0d380c4e72f..c29a6a064d2244e70d23464ddae9302d3b3afd25 100644 (file)
@@ -486,10 +486,6 @@ static long heathrow_sound_enable(struct device_node *node, long param,
 
 static u32 save_fcr[6];
 static u32 save_mbcr;
-static u32 save_gpio_levels[2];
-static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
-static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
-static u32 save_unin_clock_ctl;
 static struct dbdma_regs save_dbdma[13];
 static struct dbdma_regs save_alt_dbdma[13];
 
@@ -1548,6 +1544,10 @@ void g5_phy_disable_cpu1(void)
 
 
 #ifdef CONFIG_PM
+static u32 save_gpio_levels[2];
+static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
+static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
+static u32 save_unin_clock_ctl;
 
 static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
 {
index 257dc9068468c2b97de3a0c466ace797ee845014..f42475b27c153c324f150d9e4ff3a352c4cafba3 100644 (file)
@@ -984,30 +984,23 @@ static int __init add_bridge(struct device_node *dev)
        return 0;
 }
 
-void __init pmac_pcibios_fixup(void)
+void __devinit pmac_pci_irq_fixup(struct pci_dev *dev)
 {
-       struct pci_dev* dev = NULL;
-
-       for_each_pci_dev(dev) {
-               /* Read interrupt from the device-tree */
-               pci_read_irq_line(dev);
-
 #ifdef CONFIG_PPC32
-               /* Fixup interrupt for the modem/ethernet combo controller.
-                * on machines with a second ohare chip.
-                * The number in the device tree (27) is bogus (correct for
-                * the ethernet-only board but not the combo ethernet/modem
-                * board). The real interrupt is 28 on the second controller
-                * -> 28+32 = 60.
-                */
-               if (has_second_ohare &&
-                   dev->vendor == PCI_VENDOR_ID_DEC &&
-                   dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
-                       dev->irq = irq_create_mapping(NULL, 60);
-                       set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
-               }
-#endif /* CONFIG_PPC32 */
+       /* Fixup interrupt for the modem/ethernet combo controller.
+        * on machines with a second ohare chip.
+        * The number in the device tree (27) is bogus (correct for
+        * the ethernet-only board but not the combo ethernet/modem
+        * board). The real interrupt is 28 on the second controller
+        * -> 28+32 = 60.
+        */
+       if (has_second_ohare &&
+           dev->vendor == PCI_VENDOR_ID_DEC &&
+           dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) {
+               dev->irq = irq_create_mapping(NULL, 60);
+               set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
        }
+#endif /* CONFIG_PPC32 */
 }
 
 #ifdef CONFIG_PPC64
index 94e7b24b840b82df1cbc32d8f173e3f856bfca7d..6e090a7dea83b3428d73ff05d9b0c09f942b0d53 100644 (file)
@@ -20,7 +20,7 @@ extern void pmac_get_rtc_time(struct rtc_time *);
 extern int pmac_set_rtc_time(struct rtc_time *);
 extern void pmac_read_rtc_time(void);
 extern void pmac_calibrate_decr(void);
-extern void pmac_pcibios_fixup(void);
+extern void pmac_pci_irq_fixup(struct pci_dev *);
 extern void pmac_pci_init(void);
 extern unsigned long pmac_ide_get_base(int index);
 extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
index 824a618396ab17bf34eb84a02cea1509efff64c7..d949e9df41ef65b8ffa9d836d0a98f425da325e3 100644 (file)
@@ -70,6 +70,7 @@
 #include <asm/pmac_feature.h>
 #include <asm/time.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 #include <asm/mmu_context.h>
 #include <asm/iommu.h>
 #include <asm/smu.h>
@@ -361,7 +362,7 @@ char *bootdevice;
 void *boot_host;
 int boot_target;
 int boot_part;
-extern dev_t boot_dev;
+static dev_t boot_dev;
 
 #ifdef CONFIG_SCSI
 void __init note_scsi_host(struct device_node *node, void *host)
@@ -676,8 +677,6 @@ static int __init pmac_probe(void)
 
 #ifdef CONFIG_PPC32
        /* isa_io_base gets set in pmac_pci_init */
-       isa_mem_base = PMAC_ISA_MEM_BASE;
-       pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
        ISA_DMA_THRESHOLD = ~0L;
        DMA_MODE_READ = 1;
        DMA_MODE_WRITE = 2;
@@ -727,7 +726,7 @@ define_machine(powermac) {
        .show_cpuinfo           = pmac_show_cpuinfo,
        .init_IRQ               = pmac_pic_init,
        .get_irq                = NULL, /* changed later */
-       .pcibios_fixup          = pmac_pcibios_fixup,
+       .pci_irq_fixup          = pmac_pci_irq_fixup,
        .restart                = pmac_restart,
        .power_off              = pmac_power_off,
        .halt                   = pmac_halt,
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
new file mode 100644 (file)
index 0000000..451bfcd
--- /dev/null
@@ -0,0 +1,43 @@
+menu "PS3 Platform Options"
+       depends on PPC_PS3
+
+config PS3_HTAB_SIZE
+       depends on PPC_PS3
+       int "PS3 Platform pagetable size"
+       range 18 20
+       default 20
+       help
+         This option is only for experts who may have the desire to fine
+         tune the pagetable size on their system.  The value here is
+         expressed as the log2 of the page table size.  Valid values are
+         18, 19, and 20, corresponding to 256KB, 512KB and 1MB respectively.
+
+         If unsure, choose the default (20) with the confidence that your
+         system will have optimal runtime performance.
+
+config PS3_DYNAMIC_DMA
+       depends on PPC_PS3 && EXPERIMENTAL
+       bool "PS3 Platform dynamic DMA page table management"
+       default n
+       help
+         This option will enable kernel support to take advantage of the
+         per device dynamic DMA page table management provided by the Cell
+         processor's IO Controller.  This support incurs some runtime
+         overhead and also slightly increases kernel memory usage.  The
+         current implementation should be considered experimental.
+
+         This support is mainly for Linux kernel development.  If unsure,
+         say N.
+
+config PS3_USE_LPAR_ADDR
+       depends on PPC_PS3 && EXPERIMENTAL
+       bool "PS3 use lpar address space"
+       default y
+       help
+         This option is solely for experimentation by experts.  Disables
+         translation of lpar addresses.  SPE support currently won't work
+         without this set to y.
+
+         If you have any doubt, choose the default y.
+
+endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
new file mode 100644 (file)
index 0000000..3757cfa
--- /dev/null
@@ -0,0 +1,4 @@
+obj-y += setup.o mm.o smp.o time.o hvcall.o htab.o repository.o
+obj-y += interrupt.o exports.o os-area.o
+
+obj-$(CONFIG_SPU_BASE) += spu.o
diff --git a/arch/powerpc/platforms/ps3/exports.c b/arch/powerpc/platforms/ps3/exports.c
new file mode 100644 (file)
index 0000000..a7e8ffd
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  PS3 hvcall exports for modules.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+
+#define LV1_CALL(name, in, out, num)                          \
+  extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
+  EXPORT_SYMBOL(_lv1_##name);
+
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
new file mode 100644 (file)
index 0000000..8fe1769
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ *  PS3 pagetable management routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/machdep.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static hpte_t *htab;
+static unsigned long htab_addr;
+static unsigned char *bolttab;
+static unsigned char *inusetab;
+
+static spinlock_t ps3_bolttab_lock = SPIN_LOCK_UNLOCKED;
+
+#define debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g) \
+       _debug_dump_hpte(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _debug_dump_hpte(unsigned long pa, unsigned long va,
+       unsigned long group, unsigned long bitmap, hpte_t lhpte, int psize,
+       unsigned long slot, const char* func, int line)
+{
+       DBG("%s:%d: pa     = %lxh\n", func, line, pa);
+       DBG("%s:%d: lpar   = %lxh\n", func, line,
+               ps3_mm_phys_to_lpar(pa));
+       DBG("%s:%d: va     = %lxh\n", func, line, va);
+       DBG("%s:%d: group  = %lxh\n", func, line, group);
+       DBG("%s:%d: bitmap = %lxh\n", func, line, bitmap);
+       DBG("%s:%d: hpte.v = %lxh\n", func, line, lhpte.v);
+       DBG("%s:%d: hpte.r = %lxh\n", func, line, lhpte.r);
+       DBG("%s:%d: psize  = %xh\n", func, line, psize);
+       DBG("%s:%d: slot   = %lxh\n", func, line, slot);
+}
+
+static long ps3_hpte_insert(unsigned long hpte_group, unsigned long va,
+       unsigned long pa, unsigned long rflags, unsigned long vflags, int psize)
+{
+       unsigned long slot;
+       hpte_t lhpte;
+       int secondary = 0;
+       unsigned long result;
+       unsigned long bitmap;
+       unsigned long flags;
+       unsigned long p_pteg, s_pteg, b_index, b_mask, cb, ci;
+
+       vflags &= ~HPTE_V_SECONDARY; /* this bit is ignored */
+
+       lhpte.v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
+       lhpte.r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize) | rflags;
+
+       p_pteg = hpte_group / HPTES_PER_GROUP;
+       s_pteg = ~p_pteg & htab_hash_mask;
+
+       spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+       BUG_ON(bolttab[p_pteg] == 0xff && bolttab[s_pteg] == 0xff);
+
+       bitmap = (inusetab[p_pteg] << 8) | inusetab[s_pteg];
+
+       if (bitmap == 0xffff) {
+               /*
+                * PTEG is full. Search for victim.
+                */
+               bitmap &= ~((bolttab[p_pteg] << 8) | bolttab[s_pteg]);
+               do {
+                       ci = mftb() & 15;
+                       cb = 0x8000UL >> ci;
+               } while ((cb & bitmap) == 0);
+       } else {
+               /*
+                * search free slot in hardware order
+                *      [primary]       0, 2, 4, 6, 1, 3, 5, 7
+                *      [secondary]     0, 2, 4, 6, 1, 3, 5, 7
+                */
+               for (ci = 0; ci < HPTES_PER_GROUP; ci += 2) {
+                       cb = 0x8000UL >> ci;
+                       if ((cb & bitmap) == 0)
+                               goto found;
+               }
+               for (ci = 1; ci < HPTES_PER_GROUP; ci += 2) {
+                       cb = 0x8000UL >> ci;
+                       if ((cb & bitmap) == 0)
+                               goto found;
+               }
+               for (ci = HPTES_PER_GROUP; ci < HPTES_PER_GROUP*2; ci += 2) {
+                       cb = 0x8000UL >> ci;
+                       if ((cb & bitmap) == 0)
+                               goto found;
+               }
+               for (ci = HPTES_PER_GROUP+1; ci < HPTES_PER_GROUP*2; ci += 2) {
+                       cb = 0x8000UL >> ci;
+                       if ((cb & bitmap) == 0)
+                               goto found;
+               }
+       }
+
+found:
+       if (ci < HPTES_PER_GROUP) {
+               slot = p_pteg * HPTES_PER_GROUP + ci;
+       } else {
+               slot = s_pteg * HPTES_PER_GROUP + (ci & 7);
+               /* lhpte.dw0.dw0.h = 1; */
+               vflags |= HPTE_V_SECONDARY;
+               lhpte.v |= HPTE_V_SECONDARY;
+       }
+
+       result = lv1_write_htab_entry(0, slot, lhpte.v, lhpte.r);
+
+       if (result) {
+               debug_dump_hpte(pa, va, hpte_group, bitmap, lhpte, psize, slot);
+               BUG();
+       }
+
+       /*
+        * If used slot is not in primary HPTE group,
+        * the slot should be in secondary HPTE group.
+        */
+
+       if ((hpte_group ^ slot) & ~(HPTES_PER_GROUP - 1)) {
+               secondary = 1;
+               b_index = s_pteg;
+       } else {
+               secondary = 0;
+               b_index = p_pteg;
+       }
+
+       b_mask = (lhpte.v & HPTE_V_BOLTED) ? 1 << 7 : 0 << 7;
+       bolttab[b_index] |= b_mask >> (slot & 7);
+       b_mask = 1 << 7;
+       inusetab[b_index] |= b_mask >> (slot & 7);
+       spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+       return (slot & 7) | (secondary << 3);
+}
+
+static long ps3_hpte_remove(unsigned long hpte_group)
+{
+       panic("ps3_hpte_remove() not implemented");
+       return 0;
+}
+
+static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
+       unsigned long va, int psize, int local)
+{
+       unsigned long flags;
+       unsigned long result;
+       unsigned long pteg, bit;
+       unsigned long hpte_v, want_v;
+
+       want_v = hpte_encode_v(va, psize);
+
+       spin_lock_irqsave(&ps3_bolttab_lock, flags);
+
+       hpte_v = htab[slot].v;
+       if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+               spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+               /* ps3_hpte_insert() will be used to update PTE */
+               return -1;
+       }
+
+       result = lv1_write_htab_entry(0, slot, 0, 0);
+
+       if (result) {
+               DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+                      __func__, va, slot, psize, result, result);
+               BUG();
+       }
+
+       pteg = slot / HPTES_PER_GROUP;
+       bit = slot % HPTES_PER_GROUP;
+       inusetab[pteg] &= ~(0x80 >> bit);
+
+       spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+
+       /* ps3_hpte_insert() will be used to update PTE */
+       return -1;
+}
+
+static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
+       int psize)
+{
+       panic("ps3_hpte_updateboltedpp() not implemented");
+}
+
+static void ps3_hpte_invalidate(unsigned long slot, unsigned long va,
+       int psize, int local)
+{
+       unsigned long flags;
+       unsigned long result;
+       unsigned long pteg, bit;
+
+       spin_lock_irqsave(&ps3_bolttab_lock, flags);
+       result = lv1_write_htab_entry(0, slot, 0, 0);
+
+       if (result) {
+               DBG("%s: va=%lx slot=%lx psize=%d result = %ld (0x%lx)\n",
+                      __func__, va, slot, psize, result, result);
+               BUG();
+       }
+
+       pteg = slot / HPTES_PER_GROUP;
+       bit = slot % HPTES_PER_GROUP;
+       inusetab[pteg] &= ~(0x80 >> bit);
+       spin_unlock_irqrestore(&ps3_bolttab_lock, flags);
+}
+
+static void ps3_hpte_clear(void)
+{
+       lv1_unmap_htab(htab_addr);
+}
+
+void __init ps3_hpte_init(unsigned long htab_size)
+{
+       long bitmap_size;
+
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       ppc_md.hpte_invalidate = ps3_hpte_invalidate;
+       ppc_md.hpte_updatepp = ps3_hpte_updatepp;
+       ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
+       ppc_md.hpte_insert = ps3_hpte_insert;
+       ppc_md.hpte_remove = ps3_hpte_remove;
+       ppc_md.hpte_clear_all = ps3_hpte_clear;
+
+       ppc64_pft_size = __ilog2(htab_size);
+
+       bitmap_size = htab_size / sizeof(hpte_t) / 8;
+
+       bolttab = __va(lmb_alloc(bitmap_size, 1));
+       inusetab = __va(lmb_alloc(bitmap_size, 1));
+
+       memset(bolttab, 0, bitmap_size);
+       memset(inusetab, 0, bitmap_size);
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+void __init ps3_map_htab(void)
+{
+       long result;
+       unsigned long htab_size = (1UL << ppc64_pft_size);
+
+       result = lv1_map_htab(0, &htab_addr);
+
+       htab = (hpte_t *)__ioremap(htab_addr, htab_size, PAGE_READONLY_X);
+
+       DBG("%s:%d: lpar %016lxh, virt %016lxh\n", __func__, __LINE__,
+               htab_addr, (unsigned long)htab);
+}
diff --git a/arch/powerpc/platforms/ps3/hvcall.S b/arch/powerpc/platforms/ps3/hvcall.S
new file mode 100644 (file)
index 0000000..54be652
--- /dev/null
@@ -0,0 +1,804 @@
+/*
+ *  PS3 hvcall interface.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *  Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+#define lv1call .long 0x44000022; extsw r3, r3
+
+#define LV1_N_IN_0_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_0_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_1_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_2_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_3_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_4_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_5_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_6_IN_0_OUT LV1_N_IN_0_OUT
+#define LV1_7_IN_0_OUT LV1_N_IN_0_OUT
+
+#define LV1_0_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r3, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_0_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r3, -8(r1);                     \
+       stdu    r4, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_0_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r3, -8(r1);                     \
+       std     r4, -16(r1);                    \
+       stdu    r5, -24(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_0_IN_7_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r3, -8(r1);                     \
+       std     r4, -16(r1);                    \
+       std     r5, -24(r1);                    \
+       std     r6, -32(r1);                    \
+       std     r7, -40(r1);                    \
+       std     r8, -48(r1);                    \
+       stdu    r9, -56(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 56;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+       ld      r11, -40(r1);                   \
+       std     r8, 0(r11);                     \
+       ld      r11, -48(r1);                   \
+       std     r9, 0(r11);                     \
+       ld      r11, -56(r1);                   \
+       std     r10, 0(r11);                    \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r4, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       stdu    r5, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       std     r5, -16(r1);                    \
+       stdu    r6, -24(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_4_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       std     r5, -16(r1);                    \
+       std     r6, -24(r1);                    \
+       stdu    r7, -32(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 32;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_5_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       std     r5, -16(r1);                    \
+       std     r6, -24(r1);                    \
+       std     r7, -32(r1);                    \
+       stdu    r8, -40(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 40;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+       ld      r11, -40(r1);                   \
+       std     r8, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_6_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       std     r5, -16(r1);                    \
+       std     r6, -24(r1);                    \
+       std     r7, -32(r1);                    \
+       std     r8, -40(r1);                    \
+       stdu    r9, -48(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 48;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+       ld      r11, -40(r1);                   \
+       std     r8, 0(r11);                     \
+       ld      r11, -48(r1);                   \
+       std     r9, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_1_IN_7_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r4, -8(r1);                     \
+       std     r5, -16(r1);                    \
+       std     r6, -24(r1);                    \
+       std     r7, -32(r1);                    \
+       std     r8, -40(r1);                    \
+       std     r9, -48(r1);                    \
+       stdu    r10, -56(r1);                   \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 56;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+       ld      r11, -40(r1);                   \
+       std     r8, 0(r11);                     \
+       ld      r11, -48(r1);                   \
+       std     r9, 0(r11);                     \
+       ld      r11, -56(r1);                   \
+       std     r10, 0(r11);                    \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_2_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r5, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_2_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r5, -8(r1);                     \
+       stdu    r6, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_2_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r5, -8(r1);                     \
+       std     r6, -16(r1);                    \
+       stdu    r7, -24(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_2_IN_4_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r5, -8(r1);                     \
+       std     r6, -16(r1);                    \
+       std     r7, -24(r1);                    \
+       stdu    r8, -32(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 32;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_2_IN_5_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r5, -8(r1);                     \
+       std     r6, -16(r1);                    \
+       std     r7, -24(r1);                    \
+       std     r8, -32(r1);                    \
+       stdu    r9, -40(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 40;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+       ld      r11, -32(r1);                   \
+       std     r7, 0(r11);                     \
+       ld      r11, -40(r1);                   \
+       std     r8, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_3_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r6, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_3_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r6, -8(r1);                     \
+       stdu    r7, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_3_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r6, -8(r1);                     \
+       std     r7, -16(r1);                    \
+       stdu    r8, -24(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_4_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r7, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_4_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r7, -8(r1);                     \
+       stdu    r8, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_4_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r7, -8(r1);                     \
+       std     r8, -16(r1);                    \
+       stdu    r9, -24(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_5_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r8, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_5_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r8, -8(r1);                     \
+       stdu    r9, -16(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_5_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r8, -8(r1);                     \
+       std     r9, -16(r1);                    \
+       stdu    r10, -24(r1);                   \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 24;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, -24(r1);                   \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_6_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r9, -8(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_6_IN_2_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r9, -8(r1);                     \
+       stdu    r10, -16(r1);                   \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_6_IN_3_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r9, -8(r1);                     \
+       stdu    r10, -16(r1);                   \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 16;                     \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+       ld      r11, -16(r1);                   \
+       std     r5, 0(r11);                     \
+       ld      r11, 48+8*8(r1);                \
+       std     r6, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_7_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       stdu    r10, -8(r1);                    \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       addi    r1, r1, 8;                      \
+       ld      r11, -8(r1);                    \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_7_IN_6_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       std     r10, 48+8*7(r1);                \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       ld      r11, 48+8*7(r1);                \
+       std     r4, 0(r11);                     \
+       ld      r11, 48+8*8(r1);                \
+       std     r5, 0(r11);                     \
+       ld      r11, 48+8*9(r1);                \
+       std     r6, 0(r11);                     \
+       ld      r11, 48+8*10(r1);               \
+       std     r7, 0(r11);                     \
+       ld      r11, 48+8*11(r1);               \
+       std     r8, 0(r11);                     \
+       ld      r11, 48+8*12(r1);               \
+       std     r9, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+#define LV1_8_IN_1_OUT(API_NAME, API_NUMBER)   \
+_GLOBAL(_##API_NAME)                           \
+                                               \
+       mflr    r0;                             \
+       std     r0, 16(r1);                     \
+                                               \
+       li      r11, API_NUMBER;                \
+       lv1call;                                \
+                                               \
+       ld      r11, 48+8*8(r1);                \
+       std     r4, 0(r11);                     \
+                                               \
+       ld      r0, 16(r1);                     \
+       mtlr    r0;                             \
+       blr
+
+       .text
+
+/* the lv1 underscored call definitions expand here */
+
+#define LV1_CALL(name, in, out, num) LV1_##in##_IN_##out##_OUT(lv1_##name, num)
+#include <asm/lv1call.h>
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
new file mode 100644 (file)
index 0000000..056c1e4
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ *  PS3 interrupt routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+/**
+ * ps3_alloc_io_irq - Assign a virq to a system bus device.
+ * interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An io irq represents a non-virtualized device interrupt.  interrupt_id
+ * coresponds to the interrupt number of the interrupt controller.
+ */
+
+int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq)
+{
+       int result;
+       unsigned long outlet;
+
+       result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       *virq = irq_create_mapping(NULL, outlet);
+
+       pr_debug("%s:%d: interrupt_id %u => outlet %lu, virq %u\n",
+               __func__, __LINE__, interrupt_id, outlet, *virq);
+
+       return 0;
+}
+
+int ps3_free_io_irq(unsigned int virq)
+{
+       int result;
+
+       result = lv1_destruct_io_irq_outlet(virq_to_hw(virq));
+
+       if (!result)
+               pr_debug("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       irq_dispose_mapping(virq);
+
+       return result;
+}
+
+/**
+ * ps3_alloc_event_irq - Allocate a virq for use with a system event.
+ * @virq: The assigned Linux virq.
+ *
+ * The virq can be used with lv1_connect_interrupt_event_receive_port() to
+ * arrange to receive events, or with ps3_send_event_locally() to signal
+ * events.
+ */
+
+int ps3_alloc_event_irq(unsigned int *virq)
+{
+       int result;
+       unsigned long outlet;
+
+       result = lv1_construct_event_receive_port(&outlet);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_construct_event_receive_port failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               *virq = NO_IRQ;
+               return result;
+       }
+
+       *virq = irq_create_mapping(NULL, outlet);
+
+       pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__, outlet,
+               *virq);
+
+       return 0;
+}
+
+int ps3_free_event_irq(unsigned int virq)
+{
+       int result;
+
+       pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+       result = lv1_destruct_event_receive_port(virq_to_hw(virq));
+
+       if (result)
+               pr_debug("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       irq_dispose_mapping(virq);
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+int ps3_send_event_locally(unsigned int virq)
+{
+       return lv1_send_event_locally(virq_to_hw(virq));
+}
+
+/**
+ * ps3_connect_event_irq - Assign a virq to a system bus device.
+ * @did: The HV device identifier read from the system repository.
+ * @interrupt_id: The device interrupt id read from the system repository.
+ * @virq: The assigned Linux virq.
+ *
+ * An event irq represents a virtual device interrupt.  The interrupt_id
+ * coresponds to the software interrupt number.
+ */
+
+int ps3_connect_event_irq(const struct ps3_device_id *did,
+       unsigned int interrupt_id, unsigned int *virq)
+{
+       int result;
+
+       result = ps3_alloc_event_irq(virq);
+
+       if (result)
+               return result;
+
+       result = lv1_connect_interrupt_event_receive_port(did->bus_id,
+               did->dev_id, virq_to_hw(*virq), interrupt_id);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_connect_interrupt_event_receive_port"
+                       " failed: %s\n", __func__, __LINE__,
+                       ps3_result(result));
+               ps3_free_event_irq(*virq);
+               *virq = NO_IRQ;
+               return result;
+       }
+
+       pr_debug("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+               interrupt_id, *virq);
+
+       return 0;
+}
+
+int ps3_disconnect_event_irq(const struct ps3_device_id *did,
+       unsigned int interrupt_id, unsigned int virq)
+{
+       int result;
+
+       pr_debug(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
+               interrupt_id, virq);
+
+       result = lv1_disconnect_interrupt_event_receive_port(did->bus_id,
+               did->dev_id, virq_to_hw(virq), interrupt_id);
+
+       if (result)
+               pr_debug("%s:%d: lv1_disconnect_interrupt_event_receive_port"
+                       " failed: %s\n", __func__, __LINE__,
+                       ps3_result(result));
+
+       ps3_free_event_irq(virq);
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+/**
+ * ps3_alloc_vuart_irq - Configure the system virtual uart virq.
+ * @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
+ * @virq: The assigned Linux virq.
+ *
+ * The system supports only a single virtual uart, so multiple calls without
+ * freeing the interrupt will return a wrong state error.
+ */
+
+int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq)
+{
+       int result;
+       unsigned long outlet;
+       unsigned long lpar_addr;
+
+       BUG_ON(!is_kernel_addr((unsigned long)virt_addr_bmp));
+
+       lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
+
+       result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       *virq = irq_create_mapping(NULL, outlet);
+
+       pr_debug("%s:%d: outlet %lu, virq %u\n", __func__, __LINE__,
+               outlet, *virq);
+
+       return 0;
+}
+
+int ps3_free_vuart_irq(unsigned int virq)
+{
+       int result;
+
+       result = lv1_deconfigure_virtual_uart_irq();
+
+       if (result) {
+               pr_debug("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       irq_dispose_mapping(virq);
+
+       return result;
+}
+
+/**
+ * ps3_alloc_spe_irq - Configure an spe virq.
+ * @spe_id: The spe_id returned from lv1_construct_logical_spe().
+ * @class: The spe interrupt class {0,1,2}.
+ * @virq: The assigned Linux virq.
+ *
+ */
+
+int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
+       unsigned int *virq)
+{
+       int result;
+       unsigned long outlet;
+
+       BUG_ON(class > 2);
+
+       result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       *virq = irq_create_mapping(NULL, outlet);
+
+       pr_debug("%s:%d: spe_id %lu, class %u, outlet %lu, virq %u\n",
+               __func__, __LINE__, spe_id, class, outlet, *virq);
+
+       return 0;
+}
+
+int ps3_free_spe_irq(unsigned int virq)
+{
+       irq_dispose_mapping(virq);
+       return 0;
+}
+
+#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
+#define PS3_PLUG_MAX 63
+
+/**
+ * struct bmp - a per cpu irq status and mask bitmap structure
+ * @status: 256 bit status bitmap indexed by plug
+ * @unused_1:
+ * @mask: 256 bit mask bitmap indexed by plug
+ * @unused_2:
+ * @lock:
+ * @ipi_debug_brk_mask:
+ *
+ * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * behalf of the guest.  These mappings are implemented as 256 bit guest
+ * supplied bitmaps indexed by plug number.  The address of the bitmaps are
+ * registered with the HV through lv1_configure_irq_state_bitmap().
+ *
+ * The HV supports 256 plugs per thread, assigned as {0..255}, for a total
+ * of 512 plugs supported on a processor.  To simplify the logic this
+ * implementation equates HV plug value to linux virq value, constrains each
+ * interrupt to have a system wide unique plug number, and limits the range
+ * of the plug values to map into the first dword of the bitmaps.  This
+ * gives a usable range of plug values of  {NUM_ISA_INTERRUPTS..63}.  Note
+ * that there is no constraint on how many in this set an individual thread
+ * can aquire.
+ */
+
+struct bmp {
+       struct {
+               unsigned long status;
+               unsigned long unused_1[3];
+               unsigned long mask;
+               unsigned long unused_2[3];
+       } __attribute__ ((packed));
+       spinlock_t lock;
+       unsigned long ipi_debug_brk_mask;
+};
+
+/**
+ * struct private - a per cpu data structure
+ * @node: HV node id
+ * @cpu: HV thread id
+ * @bmp: an HV bmp structure
+ */
+
+struct private {
+       unsigned long node;
+       unsigned int cpu;
+       struct bmp bmp;
+};
+
+#if defined(DEBUG)
+static void _dump_64_bmp(const char *header, const unsigned long *p, unsigned cpu,
+       const char* func, int line)
+{
+       pr_debug("%s:%d: %s %u {%04lx_%04lx_%04lx_%04lx}\n",
+               func, line, header, cpu,
+               *p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
+               *p & 0xffff);
+}
+
+static void __attribute__ ((unused)) _dump_256_bmp(const char *header,
+       const unsigned long *p, unsigned cpu, const char* func, int line)
+{
+       pr_debug("%s:%d: %s %u {%016lx:%016lx:%016lx:%016lx}\n",
+               func, line, header, cpu, p[0], p[1], p[2], p[3]);
+}
+
+#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
+static void _dump_bmp(struct private* pd, const char* func, int line)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pd->bmp.lock, flags);
+       _dump_64_bmp("stat", &pd->bmp.status, pd->cpu, func, line);
+       _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+       spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+
+#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_mask(struct private* pd,
+       const char* func, int line)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&pd->bmp.lock, flags);
+       _dump_64_bmp("mask", &pd->bmp.mask, pd->cpu, func, line);
+       spin_unlock_irqrestore(&pd->bmp.lock, flags);
+}
+#else
+static void dump_bmp(struct private* pd) {};
+#endif /* defined(DEBUG) */
+
+static void chip_mask(unsigned int virq)
+{
+       unsigned long flags;
+       struct private *pd = get_irq_chip_data(virq);
+
+       pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+       BUG_ON(virq < NUM_ISA_INTERRUPTS);
+       BUG_ON(virq > PS3_PLUG_MAX);
+
+       spin_lock_irqsave(&pd->bmp.lock, flags);
+       pd->bmp.mask &= ~(0x8000000000000000UL >> virq);
+       spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+       lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_unmask(unsigned int virq)
+{
+       unsigned long flags;
+       struct private *pd = get_irq_chip_data(virq);
+
+       pr_debug("%s:%d: cpu %u, virq %d\n", __func__, __LINE__, pd->cpu, virq);
+
+       BUG_ON(virq < NUM_ISA_INTERRUPTS);
+       BUG_ON(virq > PS3_PLUG_MAX);
+
+       spin_lock_irqsave(&pd->bmp.lock, flags);
+       pd->bmp.mask |= (0x8000000000000000UL >> virq);
+       spin_unlock_irqrestore(&pd->bmp.lock, flags);
+
+       lv1_did_update_interrupt_mask(pd->node, pd->cpu);
+}
+
+static void chip_eoi(unsigned int virq)
+{
+       lv1_end_of_interrupt(virq);
+}
+
+static struct irq_chip irq_chip = {
+       .typename = "ps3",
+       .mask = chip_mask,
+       .unmask = chip_unmask,
+       .eoi = chip_eoi,
+};
+
+static void host_unmap(struct irq_host *h, unsigned int virq)
+{
+       int result;
+
+       pr_debug("%s:%d: virq %d\n", __func__, __LINE__, virq);
+
+       lv1_disconnect_irq_plug(virq);
+
+       result = set_irq_chip_data(virq, NULL);
+       BUG_ON(result);
+}
+
+static DEFINE_PER_CPU(struct private, private);
+
+static int host_map(struct irq_host *h, unsigned int virq,
+       irq_hw_number_t hwirq)
+{
+       int result;
+       unsigned int cpu;
+
+       pr_debug(" -> %s:%d\n", __func__, __LINE__);
+       pr_debug("%s:%d: hwirq %lu => virq %u\n", __func__, __LINE__, hwirq,
+               virq);
+
+       /* bind this virq to a cpu */
+
+       preempt_disable();
+       cpu = smp_processor_id();
+       result = lv1_connect_irq_plug(virq, hwirq);
+       preempt_enable();
+
+       if (result) {
+               pr_info("%s:%d: lv1_connect_irq_plug failed:"
+                       " %s\n", __func__, __LINE__, ps3_result(result));
+               return -EPERM;
+       }
+
+       result = set_irq_chip_data(virq, &per_cpu(private, cpu));
+       BUG_ON(result);
+
+       set_irq_chip_and_handler(virq, &irq_chip, handle_fasteoi_irq);
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+static struct irq_host_ops host_ops = {
+       .map = host_map,
+       .unmap = host_unmap,
+};
+
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
+{
+       struct private *pd = &per_cpu(private, cpu);
+
+       pd->bmp.ipi_debug_brk_mask = 0x8000000000000000UL >> virq;
+
+       pr_debug("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
+               cpu, virq, pd->bmp.ipi_debug_brk_mask);
+}
+
+static int bmp_get_and_clear_status_bit(struct bmp *m)
+{
+       unsigned long flags;
+       unsigned int bit;
+       unsigned long x;
+
+       spin_lock_irqsave(&m->lock, flags);
+
+       /* check for ipi break first to stop this cpu ASAP */
+
+       if (m->status & m->ipi_debug_brk_mask) {
+               m->status &= ~m->ipi_debug_brk_mask;
+               spin_unlock_irqrestore(&m->lock, flags);
+               return __ilog2(m->ipi_debug_brk_mask);
+       }
+
+       x = (m->status & m->mask);
+
+       for (bit = NUM_ISA_INTERRUPTS, x <<= bit; x; bit++, x <<= 1)
+               if (x & 0x8000000000000000UL) {
+                       m->status &= ~(0x8000000000000000UL >> bit);
+                       spin_unlock_irqrestore(&m->lock, flags);
+                       return bit;
+               }
+
+       spin_unlock_irqrestore(&m->lock, flags);
+
+       pr_debug("%s:%d: not found\n", __func__, __LINE__);
+       return -1;
+}
+
+unsigned int ps3_get_irq(void)
+{
+       int plug;
+
+       struct private *pd = &__get_cpu_var(private);
+
+       plug = bmp_get_and_clear_status_bit(&pd->bmp);
+
+       if (plug < 1) {
+               pr_debug("%s:%d: no plug found: cpu %u\n", __func__, __LINE__,
+                       pd->cpu);
+               dump_bmp(&per_cpu(private, 0));
+               dump_bmp(&per_cpu(private, 1));
+               return NO_IRQ;
+       }
+
+#if defined(DEBUG)
+       if (plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX) {
+               dump_bmp(&per_cpu(private, 0));
+               dump_bmp(&per_cpu(private, 1));
+               BUG();
+       }
+#endif
+       return plug;
+}
+
+void __init ps3_init_IRQ(void)
+{
+       int result;
+       unsigned long node;
+       unsigned cpu;
+       struct irq_host *host;
+
+       lv1_get_logical_ppe_id(&node);
+
+       host = irq_alloc_host(IRQ_HOST_MAP_NOMAP, 0, &host_ops,
+               PS3_INVALID_OUTLET);
+       irq_set_default_host(host);
+       irq_set_virq_count(PS3_PLUG_MAX + 1);
+
+       for_each_possible_cpu(cpu) {
+               struct private *pd = &per_cpu(private, cpu);
+
+               pd->node = node;
+               pd->cpu = cpu;
+               spin_lock_init(&pd->bmp.lock);
+
+               result = lv1_configure_irq_state_bitmap(node, cpu,
+                       ps3_mm_phys_to_lpar(__pa(&pd->bmp.status)));
+
+               if (result)
+                       pr_debug("%s:%d: lv1_configure_irq_state_bitmap failed:"
+                               " %s\n", __func__, __LINE__,
+                               ps3_result(result));
+       }
+
+       ppc_md.get_irq = ps3_get_irq;
+}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
new file mode 100644 (file)
index 0000000..49c0d01
--- /dev/null
@@ -0,0 +1,831 @@
+/*
+ *  PS3 address space management.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
+
+#include <asm/firmware.h>
+#include <asm/lmb.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+enum {
+#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+       USE_LPAR_ADDR = 1,
+#else
+       USE_LPAR_ADDR = 0,
+#endif
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+       USE_DYNAMIC_DMA = 1,
+#else
+       USE_DYNAMIC_DMA = 0,
+#endif
+};
+
+enum {
+       PAGE_SHIFT_4K = 12U,
+       PAGE_SHIFT_64K = 16U,
+       PAGE_SHIFT_16M = 24U,
+};
+
+static unsigned long make_page_sizes(unsigned long a, unsigned long b)
+{
+       return (a << 56) | (b << 48);
+}
+
+enum {
+       ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
+       ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
+};
+
+/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
+
+enum {
+       HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
+       HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
+};
+
+/*============================================================================*/
+/* virtual address space routines                                             */
+/*============================================================================*/
+
+/**
+ * struct mem_region - memory region structure
+ * @base: base address
+ * @size: size in bytes
+ * @offset: difference between base and rm.size
+ */
+
+struct mem_region {
+       unsigned long base;
+       unsigned long size;
+       unsigned long offset;
+};
+
+/**
+ * struct map - address space state variables holder
+ * @total: total memory available as reported by HV
+ * @vas_id - HV virtual address space id
+ * @htab_size: htab size in bytes
+ *
+ * The HV virtual address space (vas) allows for hotplug memory regions.
+ * Memory regions can be created and destroyed in the vas at runtime.
+ * @rm: real mode (bootmem) region
+ * @r1: hotplug memory region(s)
+ *
+ * ps3 addresses
+ * virt_addr: a cpu 'translated' effective address
+ * phys_addr: an address in what Linux thinks is the physical address space
+ * lpar_addr: an address in the HV virtual address space
+ * bus_addr: an io controller 'translated' address on a device bus
+ */
+
+struct map {
+       unsigned long total;
+       unsigned long vas_id;
+       unsigned long htab_size;
+       struct mem_region rm;
+       struct mem_region r1;
+};
+
+#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
+static void _debug_dump_map(const struct map* m, const char* func, int line)
+{
+       DBG("%s:%d: map.total     = %lxh\n", func, line, m->total);
+       DBG("%s:%d: map.rm.size   = %lxh\n", func, line, m->rm.size);
+       DBG("%s:%d: map.vas_id    = %lu\n", func, line, m->vas_id);
+       DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
+       DBG("%s:%d: map.r1.base   = %lxh\n", func, line, m->r1.base);
+       DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
+       DBG("%s:%d: map.r1.size   = %lxh\n", func, line, m->r1.size);
+}
+
+static struct map map;
+
+/**
+ * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
+ * @phys_addr: linux physical address
+ */
+
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
+{
+       BUG_ON(is_kernel_addr(phys_addr));
+       if (USE_LPAR_ADDR)
+               return phys_addr;
+       else
+               return (phys_addr < map.rm.size || phys_addr >= map.total)
+                       ? phys_addr : phys_addr + map.r1.offset;
+}
+
+EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
+
+/**
+ * ps3_mm_vas_create - create the virtual address space
+ */
+
+void __init ps3_mm_vas_create(unsigned long* htab_size)
+{
+       int result;
+       unsigned long start_address;
+       unsigned long size;
+       unsigned long access_right;
+       unsigned long max_page_size;
+       unsigned long flags;
+
+       result = lv1_query_logical_partition_address_region_info(0,
+               &start_address, &size, &access_right, &max_page_size,
+               &flags);
+
+       if (result) {
+               DBG("%s:%d: lv1_query_logical_partition_address_region_info "
+                       "failed: %s\n", __func__, __LINE__,
+                       ps3_result(result));
+               goto fail;
+       }
+
+       if (max_page_size < PAGE_SHIFT_16M) {
+               DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
+                       max_page_size);
+               goto fail;
+       }
+
+       BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
+       BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
+
+       result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
+                       2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
+                       &map.vas_id, &map.htab_size);
+
+       if (result) {
+               DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               goto fail;
+       }
+
+       result = lv1_select_virtual_address_space(map.vas_id);
+
+       if (result) {
+               DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               goto fail;
+       }
+
+       *htab_size = map.htab_size;
+
+       debug_dump_map(&map);
+
+       return;
+
+fail:
+       panic("ps3_mm_vas_create failed");
+}
+
+/**
+ * ps3_mm_vas_destroy -
+ */
+
+void ps3_mm_vas_destroy(void)
+{
+       if (map.vas_id) {
+               lv1_select_virtual_address_space(0);
+               lv1_destruct_virtual_address_space(map.vas_id);
+               map.vas_id = 0;
+       }
+}
+
+/*============================================================================*/
+/* memory hotplug routines                                                    */
+/*============================================================================*/
+
+/**
+ * ps3_mm_region_create - create a memory region in the vas
+ * @r: pointer to a struct mem_region to accept initialized values
+ * @size: requested region size
+ *
+ * This implementation creates the region with the vas large page size.
+ * @size is rounded down to a multiple of the vas large page size.
+ */
+
+int ps3_mm_region_create(struct mem_region *r, unsigned long size)
+{
+       int result;
+       unsigned long muid;
+
+       r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
+
+       DBG("%s:%d requested  %lxh\n", __func__, __LINE__, size);
+       DBG("%s:%d actual     %lxh\n", __func__, __LINE__, r->size);
+       DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
+               (unsigned long)(size - r->size),
+               (size - r->size) / 1024 / 1024);
+
+       if (r->size == 0) {
+               DBG("%s:%d: size == 0\n", __func__, __LINE__);
+               result = -1;
+               goto zero_region;
+       }
+
+       result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
+               ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
+
+       if (result || r->base < map.rm.size) {
+               DBG("%s:%d: lv1_allocate_memory failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               goto zero_region;
+       }
+
+       r->offset = r->base - map.rm.size;
+       return result;
+
+zero_region:
+       r->size = r->base = r->offset = 0;
+       return result;
+}
+
+/**
+ * ps3_mm_region_destroy - destroy a memory region
+ * @r: pointer to struct mem_region
+ */
+
+void ps3_mm_region_destroy(struct mem_region *r)
+{
+       if (r->base) {
+               lv1_release_memory(r->base);
+               r->size = r->base = r->offset = 0;
+               map.total = map.rm.size;
+       }
+}
+
+/**
+ * ps3_mm_add_memory - hot add memory
+ */
+
+static int __init ps3_mm_add_memory(void)
+{
+       int result;
+       unsigned long start_addr;
+       unsigned long start_pfn;
+       unsigned long nr_pages;
+
+       if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+               return 0;
+
+       BUG_ON(!mem_init_done);
+
+       start_addr = USE_LPAR_ADDR ? map.r1.base : map.rm.size;
+       start_pfn = start_addr >> PAGE_SHIFT;
+       nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
+               __func__, __LINE__, start_addr, start_pfn, nr_pages);
+
+       result = add_memory(0, start_addr, map.r1.size);
+
+       if (result) {
+               DBG("%s:%d: add_memory failed: (%d)\n",
+                       __func__, __LINE__, result);
+               return result;
+       }
+
+       result = online_pages(start_pfn, nr_pages);
+
+       if (result)
+               DBG("%s:%d: online_pages failed: (%d)\n",
+                       __func__, __LINE__, result);
+
+       return result;
+}
+
+core_initcall(ps3_mm_add_memory);
+
+/*============================================================================*/
+/* dma routines                                                               */
+/*============================================================================*/
+
+/**
+ * dma_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
+ * @r: pointer to dma region structure
+ * @lpar_addr: HV lpar address
+ */
+
+static unsigned long dma_lpar_to_bus(struct ps3_dma_region *r,
+       unsigned long lpar_addr)
+{
+       BUG_ON(lpar_addr >= map.r1.base + map.r1.size);
+       return r->bus_addr + (lpar_addr <= map.rm.size ? lpar_addr
+               : lpar_addr - map.r1.offset);
+}
+
+#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
+static void _dma_dump_region(const struct ps3_dma_region *r, const char* func,
+       int line)
+{
+       DBG("%s:%d: dev        %u:%u\n", func, line, r->did.bus_id,
+               r->did.dev_id);
+       DBG("%s:%d: page_size  %u\n", func, line, r->page_size);
+       DBG("%s:%d: bus_addr   %lxh\n", func, line, r->bus_addr);
+       DBG("%s:%d: len        %lxh\n", func, line, r->len);
+}
+
+/**
+ * dma_chunk - A chunk of dma pages mapped by the io controller.
+ * @region - The dma region that owns this chunk.
+ * @lpar_addr: Starting lpar address of the area to map.
+ * @bus_addr: Starting ioc bus address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
+ * list of all chuncks owned by the region.
+ *
+ * This implementation uses a very simple dma page manager
+ * based on the dma_chunk structure.  This scheme assumes
+ * that all drivers use very well behaved dma ops.
+ */
+
+struct dma_chunk {
+       struct ps3_dma_region *region;
+       unsigned long lpar_addr;
+       unsigned long bus_addr;
+       unsigned long len;
+       struct list_head link;
+       unsigned int usage_count;
+};
+
+#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
+static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
+       int line)
+{
+       DBG("%s:%d: r.dev        %u:%u\n", func, line,
+               c->region->did.bus_id, c->region->did.dev_id);
+       DBG("%s:%d: r.bus_addr   %lxh\n", func, line, c->region->bus_addr);
+       DBG("%s:%d: r.page_size  %u\n", func, line, c->region->page_size);
+       DBG("%s:%d: r.len        %lxh\n", func, line, c->region->len);
+       DBG("%s:%d: c.lpar_addr  %lxh\n", func, line, c->lpar_addr);
+       DBG("%s:%d: c.bus_addr   %lxh\n", func, line, c->bus_addr);
+       DBG("%s:%d: c.len        %lxh\n", func, line, c->len);
+}
+
+static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
+       unsigned long bus_addr, unsigned long len)
+{
+       struct dma_chunk *c;
+       unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
+       unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+
+       list_for_each_entry(c, &r->chunk_list.head, link) {
+               /* intersection */
+               if (aligned_bus >= c->bus_addr
+                       && aligned_bus < c->bus_addr + c->len
+                       && aligned_bus + aligned_len <= c->bus_addr + c->len) {
+                       return c;
+               }
+               /* below */
+               if (aligned_bus + aligned_len <= c->bus_addr) {
+                       continue;
+               }
+               /* above */
+               if (aligned_bus >= c->bus_addr + c->len) {
+                       continue;
+               }
+
+               /* we don't handle the multi-chunk case for now */
+
+               dma_dump_chunk(c);
+               BUG();
+       }
+       return NULL;
+}
+
+static int dma_free_chunk(struct dma_chunk *c)
+{
+       int result = 0;
+
+       if (c->bus_addr) {
+               result = lv1_unmap_device_dma_region(c->region->did.bus_id,
+                       c->region->did.dev_id, c->bus_addr, c->len);
+               BUG_ON(result);
+       }
+
+       kfree(c);
+       return result;
+}
+
+/**
+ * dma_map_pages - Maps dma pages into the io controller bus address space.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @phys_addr: Starting physical address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * c_out: A pointer to receive an allocated struct dma_chunk for this area.
+ *
+ * This is the lowest level dma mapping routine, and is the one that will
+ * make the HV call to add the pages into the io controller address space.
+ */
+
+static int dma_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
+       unsigned long len, struct dma_chunk **c_out)
+{
+       int result;
+       struct dma_chunk *c;
+
+       c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
+
+       if (!c) {
+               result = -ENOMEM;
+               goto fail_alloc;
+       }
+
+       c->region = r;
+       c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+       c->bus_addr = dma_lpar_to_bus(r, c->lpar_addr);
+       c->len = len;
+
+       result = lv1_map_device_dma_region(c->region->did.bus_id,
+               c->region->did.dev_id, c->lpar_addr, c->bus_addr, c->len,
+               0xf800000000000000UL);
+
+       if (result) {
+               DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               goto fail_map;
+       }
+
+       list_add(&c->link, &r->chunk_list.head);
+
+       *c_out = c;
+       return 0;
+
+fail_map:
+       kfree(c);
+fail_alloc:
+       *c_out = NULL;
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+/**
+ * dma_region_create - Create a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region create routine, and is the one that
+ * will make the HV call to create the region.
+ */
+
+static int dma_region_create(struct ps3_dma_region* r)
+{
+       int result;
+
+       r->len = _ALIGN_UP(map.total, 1 << r->page_size);
+       INIT_LIST_HEAD(&r->chunk_list.head);
+       spin_lock_init(&r->chunk_list.lock);
+
+       result = lv1_allocate_device_dma_region(r->did.bus_id, r->did.dev_id,
+               r->len, r->page_size, r->region_type, &r->bus_addr);
+
+       dma_dump_region(r);
+
+       if (result) {
+               DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               r->len = r->bus_addr = 0;
+       }
+
+       return result;
+}
+
+/**
+ * dma_region_free - Free a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This is the lowest level dma region free routine, and is the one that
+ * will make the HV call to free the region.
+ */
+
+static int dma_region_free(struct ps3_dma_region* r)
+{
+       int result;
+       struct dma_chunk *c;
+       struct dma_chunk *tmp;
+
+       list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
+               list_del(&c->link);
+               dma_free_chunk(c);
+       }
+
+       result = lv1_free_device_dma_region(r->did.bus_id, r->did.dev_id,
+               r->bus_addr);
+
+       if (result)
+               DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       r->len = r->bus_addr = 0;
+
+       return result;
+}
+
+/**
+ * dma_map_area - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This is the common dma mapping routine.
+ */
+
+static int dma_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
+       unsigned long len, unsigned long *bus_addr)
+{
+       int result;
+       unsigned long flags;
+       struct dma_chunk *c;
+       unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+               : virt_addr;
+
+       *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+
+       if (!USE_DYNAMIC_DMA) {
+               unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
+               DBG(" -> %s:%d\n", __func__, __LINE__);
+               DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
+                       virt_addr);
+               DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
+                       phys_addr);
+               DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
+                       lpar_addr);
+               DBG("%s:%d len       %lxh\n", __func__, __LINE__, len);
+               DBG("%s:%d bus_addr  %lxh (%lxh)\n", __func__, __LINE__,
+               *bus_addr, len);
+       }
+
+       spin_lock_irqsave(&r->chunk_list.lock, flags);
+       c = dma_find_chunk(r, *bus_addr, len);
+
+       if (c) {
+               c->usage_count++;
+               spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+               return 0;
+       }
+
+       result = dma_map_pages(r, _ALIGN_DOWN(phys_addr, 1 << r->page_size),
+               _ALIGN_UP(len, 1 << r->page_size), &c);
+
+       if (result) {
+               *bus_addr = 0;
+               DBG("%s:%d: dma_map_pages failed (%d)\n",
+                       __func__, __LINE__, result);
+               spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+               return result;
+       }
+
+       c->usage_count = 1;
+
+       spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+       return result;
+}
+
+/**
+ * dma_unmap_area - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This is the common dma unmap routine.
+ */
+
+int dma_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
+       unsigned long len)
+{
+       unsigned long flags;
+       struct dma_chunk *c;
+
+       spin_lock_irqsave(&r->chunk_list.lock, flags);
+       c = dma_find_chunk(r, bus_addr, len);
+
+       if (!c) {
+               unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+                       1 << r->page_size);
+               unsigned long aligned_len = _ALIGN_UP(len, 1 << r->page_size);
+               DBG("%s:%d: not found: bus_addr %lxh\n",
+                       __func__, __LINE__, bus_addr);
+               DBG("%s:%d: not found: len %lxh\n",
+                       __func__, __LINE__, len);
+               DBG("%s:%d: not found: aligned_bus %lxh\n",
+                       __func__, __LINE__, aligned_bus);
+               DBG("%s:%d: not found: aligned_len %lxh\n",
+                       __func__, __LINE__, aligned_len);
+               BUG();
+       }
+
+       c->usage_count--;
+
+       if (!c->usage_count) {
+               list_del(&c->link);
+               dma_free_chunk(c);
+       }
+
+       spin_unlock_irqrestore(&r->chunk_list.lock, flags);
+       return 0;
+}
+
+/**
+ * dma_region_create_linear - Setup a linear dma maping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine creates an HV dma region for the device and maps all available
+ * ram into the io controller bus address space.
+ */
+
+static int dma_region_create_linear(struct ps3_dma_region *r)
+{
+       int result;
+       unsigned long tmp;
+
+       /* force 16M dma pages for linear mapping */
+
+       if (r->page_size != PS3_DMA_16M) {
+               pr_info("%s:%d: forcing 16M pages for linear map\n",
+                       __func__, __LINE__);
+               r->page_size = PS3_DMA_16M;
+       }
+
+       result = dma_region_create(r);
+       BUG_ON(result);
+
+       result = dma_map_area(r, map.rm.base, map.rm.size, &tmp);
+       BUG_ON(result);
+
+       if (USE_LPAR_ADDR)
+               result = dma_map_area(r, map.r1.base, map.r1.size,
+                       &tmp);
+       else
+               result = dma_map_area(r, map.rm.size, map.r1.size,
+                       &tmp);
+
+       BUG_ON(result);
+
+       return result;
+}
+
+/**
+ * dma_region_free_linear - Free a linear dma mapping for a device.
+ * @r: Pointer to a struct ps3_dma_region.
+ *
+ * This routine will unmap all mapped areas and free the HV dma region.
+ */
+
+static int dma_region_free_linear(struct ps3_dma_region *r)
+{
+       int result;
+
+       result = dma_unmap_area(r, dma_lpar_to_bus(r, 0), map.rm.size);
+       BUG_ON(result);
+
+       result = dma_unmap_area(r, dma_lpar_to_bus(r, map.r1.base),
+               map.r1.size);
+       BUG_ON(result);
+
+       result = dma_region_free(r);
+       BUG_ON(result);
+
+       return result;
+}
+
+/**
+ * dma_map_area_linear - Map an area of memory into a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @virt_addr: Starting virtual address of the area to map.
+ * @len: Length in bytes of the area to map.
+ * @bus_addr: A pointer to return the starting ioc bus address of the area to
+ * map.
+ *
+ * This routine just returns the coresponding bus address.  Actual mapping
+ * occurs in dma_region_create_linear().
+ */
+
+static int dma_map_area_linear(struct ps3_dma_region *r,
+       unsigned long virt_addr, unsigned long len, unsigned long *bus_addr)
+{
+       unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
+               : virt_addr;
+       *bus_addr = dma_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
+       return 0;
+}
+
+/**
+ * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
+ * @r: Pointer to a struct ps3_dma_region.
+ * @bus_addr: The starting ioc bus address of the area to unmap.
+ * @len: Length in bytes of the area to unmap.
+ *
+ * This routine does nothing.  Unmapping occurs in dma_region_free_linear().
+ */
+
+static int dma_unmap_area_linear(struct ps3_dma_region *r,
+       unsigned long bus_addr, unsigned long len)
+{
+       return 0;
+}
+
+int ps3_dma_region_create(struct ps3_dma_region *r)
+{
+       return (USE_DYNAMIC_DMA)
+               ? dma_region_create(r)
+               : dma_region_create_linear(r);
+}
+
+int ps3_dma_region_free(struct ps3_dma_region *r)
+{
+       return (USE_DYNAMIC_DMA)
+               ? dma_region_free(r)
+               : dma_region_free_linear(r);
+}
+
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+       unsigned long len, unsigned long *bus_addr)
+{
+       return (USE_DYNAMIC_DMA)
+               ? dma_map_area(r, virt_addr, len, bus_addr)
+               : dma_map_area_linear(r, virt_addr, len, bus_addr);
+}
+
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+       unsigned long len)
+{
+       return (USE_DYNAMIC_DMA) ? dma_unmap_area(r, bus_addr, len)
+               : dma_unmap_area_linear(r, bus_addr, len);
+}
+
+/*============================================================================*/
+/* system startup routines                                                    */
+/*============================================================================*/
+
+/**
+ * ps3_mm_init - initialize the address space state variables
+ */
+
+void __init ps3_mm_init(void)
+{
+       int result;
+
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
+               &map.total);
+
+       if (result)
+               panic("ps3_repository_read_mm_info() failed");
+
+       map.rm.offset = map.rm.base;
+       map.vas_id = map.htab_size = 0;
+
+       /* this implementation assumes map.rm.base is zero */
+
+       BUG_ON(map.rm.base);
+       BUG_ON(!map.rm.size);
+
+       lmb_add(map.rm.base, map.rm.size);
+       lmb_analyze();
+
+       /* arrange to do this in ps3_mm_add_memory */
+       ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+/**
+ * ps3_mm_shutdown - final cleanup of address space
+ */
+
+void ps3_mm_shutdown(void)
+{
+       ps3_mm_region_destroy(&map.r1);
+       map.total = map.rm.size;
+}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
new file mode 100644 (file)
index 0000000..5835830
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ *  PS3 'Other OS' area data.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include <asm/lmb.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+enum {
+       OS_AREA_SEGMENT_SIZE = 0X200,
+};
+
+enum {
+       HEADER_LDR_FORMAT_RAW = 0,
+       HEADER_LDR_FORMAT_GZIP = 1,
+};
+
+/**
+ * struct os_area_header - os area header segment.
+ * @magic_num: Always 'cell_ext_os_area'.
+ * @hdr_version: Header format version number.
+ * @os_area_offset: Starting segment number of os image area.
+ * @ldr_area_offset: Starting segment number of bootloader image area.
+ * @ldr_format: HEADER_LDR_FORMAT flag.
+ * @ldr_size: Size of bootloader image in bytes.
+ *
+ * Note that the docs refer to area offsets.  These are offsets in units of
+ * segments from the start of the os area (top of the header).  These are
+ * better thought of as segment numbers.  The os area of the os area is
+ * reserved for the os image.
+ */
+
+struct os_area_header {
+       s8 magic_num[16];
+       u32 hdr_version;
+       u32 os_area_offset;
+       u32 ldr_area_offset;
+       u32 _reserved_1;
+       u32 ldr_format;
+       u32 ldr_size;
+       u32 _reserved_2[6];
+} __attribute__ ((packed));
+
+enum {
+       PARAM_BOOT_FLAG_GAME_OS = 0,
+       PARAM_BOOT_FLAG_OTHER_OS = 1,
+};
+
+enum {
+       PARAM_AV_MULTI_OUT_NTSC = 0,
+       PARAM_AV_MULTI_OUT_PAL_RGB = 1,
+       PARAM_AV_MULTI_OUT_PAL_YCBCR = 2,
+       PARAM_AV_MULTI_OUT_SECAM = 3,
+};
+
+enum {
+       PARAM_CTRL_BUTTON_O_IS_YES = 0,
+       PARAM_CTRL_BUTTON_X_IS_YES = 1,
+};
+
+/**
+ * struct os_area_params - os area params segment.
+ * @boot_flag: User preference of operating system, PARAM_BOOT_FLAG flag.
+ * @num_params: Number of params in this (params) segment.
+ * @rtc_diff: Difference in seconds between 1970 and the ps3 rtc value.
+ * @av_multi_out: User preference of AV output, PARAM_AV_MULTI_OUT flag.
+ * @ctrl_button: User preference of controller button config, PARAM_CTRL_BUTTON
+ *     flag.
+ * @static_ip_addr: User preference of static IP address.
+ * @network_mask: User preference of static network mask.
+ * @default_gateway: User preference of static default gateway.
+ * @dns_primary: User preference of static primary dns server.
+ * @dns_secondary: User preference of static secondary dns server.
+ *
+ * User preference of zero for static_ip_addr means use dhcp.
+ */
+
+struct os_area_params {
+       u32 boot_flag;
+       u32 _reserved_1[3];
+       u32 num_params;
+       u32 _reserved_2[3];
+       /* param 0 */
+       s64 rtc_diff;
+       u8 av_multi_out;
+       u8 ctrl_button;
+       u8 _reserved_3[6];
+       /* param 1 */
+       u8 static_ip_addr[4];
+       u8 network_mask[4];
+       u8 default_gateway[4];
+       u8 _reserved_4[4];
+       /* param 2 */
+       u8 dns_primary[4];
+       u8 dns_secondary[4];
+       u8 _reserved_5[8];
+} __attribute__ ((packed));
+
+/**
+ * struct saved_params - Static working copies of data from the 'Other OS' area.
+ *
+ * For the convinience of the guest, the HV makes a copy of the 'Other OS' area
+ * in flash to a high address in the boot memory region and then puts that RAM
+ * address and the byte count into the repository for retreval by the guest.
+ * We copy the data we want into a static variable and allow the memory setup
+ * by the HV to be claimed by the lmb manager.
+ */
+
+struct saved_params {
+       /* param 0 */
+       s64 rtc_diff;
+       unsigned int av_multi_out;
+       unsigned int ctrl_button;
+       /* param 1 */
+       u8 static_ip_addr[4];
+       u8 network_mask[4];
+       u8 default_gateway[4];
+       /* param 2 */
+       u8 dns_primary[4];
+       u8 dns_secondary[4];
+} static saved_params;
+
+#define dump_header(_a) _dump_header(_a, __func__, __LINE__)
+static void _dump_header(const struct os_area_header __iomem *h, const char* func,
+       int line)
+{
+       pr_debug("%s:%d: h.magic_num:         '%s'\n", func, line,
+               h->magic_num);
+       pr_debug("%s:%d: h.hdr_version:       %u\n", func, line,
+               h->hdr_version);
+       pr_debug("%s:%d: h.os_area_offset:   %u\n", func, line,
+               h->os_area_offset);
+       pr_debug("%s:%d: h.ldr_area_offset: %u\n", func, line,
+               h->ldr_area_offset);
+       pr_debug("%s:%d: h.ldr_format:        %u\n", func, line,
+               h->ldr_format);
+       pr_debug("%s:%d: h.ldr_size:          %xh\n", func, line,
+               h->ldr_size);
+}
+
+#define dump_params(_a) _dump_params(_a, __func__, __LINE__)
+static void _dump_params(const struct os_area_params __iomem *p, const char* func,
+       int line)
+{
+       pr_debug("%s:%d: p.boot_flag:       %u\n", func, line, p->boot_flag);
+       pr_debug("%s:%d: p.num_params:      %u\n", func, line, p->num_params);
+       pr_debug("%s:%d: p.rtc_diff         %ld\n", func, line, p->rtc_diff);
+       pr_debug("%s:%d: p.av_multi_out     %u\n", func, line, p->av_multi_out);
+       pr_debug("%s:%d: p.ctrl_button:     %u\n", func, line, p->ctrl_button);
+       pr_debug("%s:%d: p.static_ip_addr:  %u.%u.%u.%u\n", func, line,
+               p->static_ip_addr[0], p->static_ip_addr[1],
+               p->static_ip_addr[2], p->static_ip_addr[3]);
+       pr_debug("%s:%d: p.network_mask:    %u.%u.%u.%u\n", func, line,
+               p->network_mask[0], p->network_mask[1],
+               p->network_mask[2], p->network_mask[3]);
+       pr_debug("%s:%d: p.default_gateway: %u.%u.%u.%u\n", func, line,
+               p->default_gateway[0], p->default_gateway[1],
+               p->default_gateway[2], p->default_gateway[3]);
+       pr_debug("%s:%d: p.dns_primary:     %u.%u.%u.%u\n", func, line,
+               p->dns_primary[0], p->dns_primary[1],
+               p->dns_primary[2], p->dns_primary[3]);
+       pr_debug("%s:%d: p.dns_secondary:   %u.%u.%u.%u\n", func, line,
+               p->dns_secondary[0], p->dns_secondary[1],
+               p->dns_secondary[2], p->dns_secondary[3]);
+}
+
+static int __init verify_header(const struct os_area_header *header)
+{
+       if (memcmp(header->magic_num, "cell_ext_os_area", 16)) {
+               pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
+               return -1;
+       }
+
+       if (header->hdr_version < 1) {
+               pr_debug("%s:%d hdr_version failed\n", __func__, __LINE__);
+               return -1;
+       }
+
+       if (header->os_area_offset > header->ldr_area_offset) {
+               pr_debug("%s:%d offsets failed\n", __func__, __LINE__);
+               return -1;
+       }
+
+       return 0;
+}
+
+int __init ps3_os_area_init(void)
+{
+       int result;
+       u64 lpar_addr;
+       unsigned int size;
+       struct os_area_header *header;
+       struct os_area_params *params;
+
+       result = ps3_repository_read_boot_dat_info(&lpar_addr, &size);
+
+       if (result) {
+               pr_debug("%s:%d ps3_repository_read_boot_dat_info failed\n",
+                       __func__, __LINE__);
+               return result;
+       }
+
+       header = (struct os_area_header *)__va(lpar_addr);
+       params = (struct os_area_params *)__va(lpar_addr + OS_AREA_SEGMENT_SIZE);
+
+       result = verify_header(header);
+
+       if (result) {
+               pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+               dump_header(header);
+               return -EIO;
+       }
+
+       dump_header(header);
+       dump_params(params);
+
+       saved_params.rtc_diff = params->rtc_diff;
+       saved_params.av_multi_out = params->av_multi_out;
+       saved_params.ctrl_button = params->ctrl_button;
+       memcpy(saved_params.static_ip_addr, params->static_ip_addr, 4);
+       memcpy(saved_params.network_mask, params->network_mask, 4);
+       memcpy(saved_params.default_gateway, params->default_gateway, 4);
+       memcpy(saved_params.dns_secondary, params->dns_secondary, 4);
+
+       return result;
+}
+
+/**
+ * ps3_os_area_rtc_diff - Returns the ps3 rtc diff value.
+ *
+ * The ps3 rtc maintains a value that approximates seconds since
+ * 2000-01-01 00:00:00 UTC.  Returns the exact number of seconds from 1970 to
+ * 2000 when saved_params.rtc_diff has not been properly set up.
+ */
+
+u64 ps3_os_area_rtc_diff(void)
+{
+       return saved_params.rtc_diff ? saved_params.rtc_diff : 946684800UL;
+}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
new file mode 100644 (file)
index 0000000..23b111b
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ *  PS3 platform declarations.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_PS3_PLATFORM_H)
+#define _PS3_PLATFORM_H
+
+#include <linux/rtc.h>
+
+/* htab */
+
+void __init ps3_hpte_init(unsigned long htab_size);
+void __init ps3_map_htab(void);
+
+/* mm */
+
+void __init ps3_mm_init(void);
+void __init ps3_mm_vas_create(unsigned long* htab_size);
+void ps3_mm_vas_destroy(void);
+void ps3_mm_shutdown(void);
+
+/* irq */
+
+void ps3_init_IRQ(void);
+void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+
+/* smp */
+
+void smp_init_ps3(void);
+void ps3_smp_cleanup_cpu(int cpu);
+
+/* time */
+
+void __init ps3_calibrate_decr(void);
+unsigned long __init ps3_get_boot_time(void);
+void ps3_get_rtc_time(struct rtc_time *time);
+int ps3_set_rtc_time(struct rtc_time *time);
+
+/* os area */
+
+int __init ps3_os_area_init(void);
+u64 ps3_os_area_rtc_diff(void);
+
+/* spu */
+
+#if defined(CONFIG_SPU_BASE)
+void ps3_spu_set_platform (void);
+#else
+static inline void ps3_spu_set_platform (void) {}
+#endif
+
+#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
new file mode 100644 (file)
index 0000000..273a0d6
--- /dev/null
@@ -0,0 +1,840 @@
+/*
+ *  PS3 repository routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+enum ps3_vendor_id {
+       PS3_VENDOR_ID_NONE = 0,
+       PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
+};
+
+enum ps3_lpar_id {
+       PS3_LPAR_ID_CURRENT = 0,
+       PS3_LPAR_ID_PME = 1,
+};
+
+#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
+static void _dump_field(const char *hdr, u64 n, const char* func, int line)
+{
+#if defined(DEBUG)
+       char s[16];
+       const char *const in = (const char *)&n;
+       unsigned int i;
+
+       for (i = 0; i < 8; i++)
+               s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
+       s[i] = 0;
+
+       pr_debug("%s:%d: %s%016lx : %s\n", func, line, hdr, n, s);
+#endif
+}
+
+#define dump_node_name(_a, _b, _c, _d, _e) \
+       _dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_node_name (unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
+       u64 n4, const char* func, int line)
+{
+       pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+       _dump_field("n1: ", n1, func, line);
+       _dump_field("n2: ", n2, func, line);
+       _dump_field("n3: ", n3, func, line);
+       _dump_field("n4: ", n4, func, line);
+}
+
+#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
+       _dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
+static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+       u64 v1, u64 v2, const char* func, int line)
+{
+       pr_debug("%s:%d: lpar: %u\n", func, line, lpar_id);
+       _dump_field("n1: ", n1, func, line);
+       _dump_field("n2: ", n2, func, line);
+       _dump_field("n3: ", n3, func, line);
+       _dump_field("n4: ", n4, func, line);
+       pr_debug("%s:%d: v1: %016lx\n", func, line, v1);
+       pr_debug("%s:%d: v2: %016lx\n", func, line, v2);
+}
+
+/**
+ * make_first_field - Make the first field of a repository node name.
+ * @text: Text portion of the field.
+ * @index: Numeric index portion of the field.  Use zero for 'don't care'.
+ *
+ * This routine sets the vendor id to zero (non-vendor specific).
+ * Returns field value.
+ */
+
+static u64 make_first_field(const char *text, u64 index)
+{
+       u64 n;
+
+       strncpy((char *)&n, text, 8);
+       return PS3_VENDOR_ID_NONE + (n >> 32) + index;
+}
+
+/**
+ * make_field - Make subsequent fields of a repository node name.
+ * @text: Text portion of the field.  Use "" for 'don't care'.
+ * @index: Numeric index portion of the field.  Use zero for 'don't care'.
+ *
+ * Returns field value.
+ */
+
+static u64 make_field(const char *text, u64 index)
+{
+       u64 n;
+
+       strncpy((char *)&n, text, 8);
+       return n + index;
+}
+
+/**
+ * read_node - Read a repository node from raw fields.
+ * @n1: First field of node name.
+ * @n2: Second field of node name.  Use zero for 'don't care'.
+ * @n3: Third field of node name.  Use zero for 'don't care'.
+ * @n4: Fourth field of node name.  Use zero for 'don't care'.
+ * @v1: First repository value (high word).
+ * @v2: Second repository value (low word).  Optional parameter, use zero
+ *      for 'don't care'.
+ */
+
+static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
+       u64 *_v1, u64 *_v2)
+{
+       int result;
+       u64 v1;
+       u64 v2;
+
+       if (lpar_id == PS3_LPAR_ID_CURRENT) {
+               u64 id;
+               lv1_get_logical_partition_id(&id);
+               lpar_id = id;
+       }
+
+       result = lv1_get_repository_node_value(lpar_id, n1, n2, n3, n4, &v1,
+               &v2);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_get_repository_node_value failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               dump_node_name(lpar_id, n1, n2, n3, n4);
+               return result;
+       }
+
+       dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
+
+       if (_v1)
+               *_v1 = v1;
+       if (_v2)
+               *_v2 = v2;
+
+       if (v1 && !_v1)
+               pr_debug("%s:%d: warning: discarding non-zero v1: %016lx\n",
+                       __func__, __LINE__, v1);
+       if (v2 && !_v2)
+               pr_debug("%s:%d: warning: discarding non-zero v2: %016lx\n",
+                       __func__, __LINE__, v2);
+
+       return result;
+}
+
+int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+       u64 *value)
+{
+       return read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field(bus_str, 0),
+               0, 0,
+               value, 0);
+}
+
+int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id)
+{
+       int result;
+       u64 v1;
+       u64 v2; /* unused */
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("id", 0),
+               0, 0,
+               &v1, &v2);
+       *bus_id = v1;
+       return result;
+}
+
+int ps3_repository_read_bus_type(unsigned int bus_index,
+       enum ps3_bus_type *bus_type)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("type", 0),
+               0, 0,
+               &v1, 0);
+       *bus_type = v1;
+       return result;
+}
+
+int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+       unsigned int *num_dev)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("num_dev", 0),
+               0, 0,
+               &v1, 0);
+       *num_dev = v1;
+       return result;
+}
+
+int ps3_repository_read_dev_str(unsigned int bus_index,
+       unsigned int dev_index, const char *dev_str, u64 *value)
+{
+       return read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field(dev_str, 0),
+               0,
+               value, 0);
+}
+
+int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+       unsigned int *dev_id)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field("id", 0),
+               0,
+               &v1, 0);
+       *dev_id = v1;
+       return result;
+}
+
+int ps3_repository_read_dev_type(unsigned int bus_index,
+       unsigned int dev_index, enum ps3_dev_type *dev_type)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field("type", 0),
+               0,
+               &v1, 0);
+       *dev_type = v1;
+       return result;
+}
+
+int ps3_repository_read_dev_intr(unsigned int bus_index,
+       unsigned int dev_index, unsigned int intr_index,
+       unsigned int *intr_type, unsigned int* interrupt_id)
+{
+       int result;
+       u64 v1;
+       u64 v2;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field("intr", intr_index),
+               0,
+               &v1, &v2);
+       *intr_type = v1;
+       *interrupt_id = v2;
+       return result;
+}
+
+int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field("reg", reg_index),
+               make_field("type", 0),
+               &v1, 0);
+       *reg_type = v1;
+       return result;
+}
+
+int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
+{
+       return read_node(PS3_LPAR_ID_PME,
+               make_first_field("bus", bus_index),
+               make_field("dev", dev_index),
+               make_field("reg", reg_index),
+               make_field("data", 0),
+               bus_addr, len);
+}
+
+int ps3_repository_read_dev_reg(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index, unsigned int *reg_type,
+       u64 *bus_addr, u64 *len)
+{
+       int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
+               reg_index, reg_type);
+       return result ? result
+               : ps3_repository_read_dev_reg_addr(bus_index, dev_index,
+               reg_index, bus_addr, len);
+}
+
+#if defined(DEBUG)
+int ps3_repository_dump_resource_info(unsigned int bus_index,
+       unsigned int dev_index)
+{
+       int result = 0;
+       unsigned int res_index;
+
+       pr_debug(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
+               bus_index, dev_index);
+
+       for (res_index = 0; res_index < 10; res_index++) {
+               enum ps3_interrupt_type intr_type;
+               unsigned int interrupt_id;
+
+               result = ps3_repository_read_dev_intr(bus_index, dev_index,
+                       res_index, &intr_type, &interrupt_id);
+
+               if (result) {
+                       if (result !=  LV1_NO_ENTRY)
+                               pr_debug("%s:%d ps3_repository_read_dev_intr"
+                                       " (%u:%u) failed\n", __func__, __LINE__,
+                                       bus_index, dev_index);
+                       break;
+               }
+
+               pr_debug("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
+                       __func__, __LINE__, bus_index, dev_index, intr_type,
+                       interrupt_id);
+       }
+
+       for (res_index = 0; res_index < 10; res_index++) {
+               enum ps3_region_type reg_type;
+               u64 bus_addr;
+               u64 len;
+
+               result = ps3_repository_read_dev_reg(bus_index, dev_index,
+                       res_index, &reg_type, &bus_addr, &len);
+
+               if (result) {
+                       if (result !=  LV1_NO_ENTRY)
+                               pr_debug("%s:%d ps3_repository_read_dev_reg"
+                                       " (%u:%u) failed\n", __func__, __LINE__,
+                                       bus_index, dev_index);
+                       break;
+               }
+
+               pr_debug("%s:%d (%u:%u) reg_type %u, bus_addr %lxh, len %lxh\n",
+                       __func__, __LINE__, bus_index, dev_index, reg_type,
+                       bus_addr, len);
+       }
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+static int dump_device_info(unsigned int bus_index, unsigned int num_dev)
+{
+       int result = 0;
+       unsigned int dev_index;
+
+       pr_debug(" -> %s:%d: bus_%u\n", __func__, __LINE__, bus_index);
+
+       for (dev_index = 0; dev_index < num_dev; dev_index++) {
+               enum ps3_dev_type dev_type;
+               unsigned int dev_id;
+
+               result = ps3_repository_read_dev_type(bus_index, dev_index,
+                       &dev_type);
+
+               if (result) {
+                       pr_debug("%s:%d ps3_repository_read_dev_type"
+                               " (%u:%u) failed\n", __func__, __LINE__,
+                               bus_index, dev_index);
+                       break;
+               }
+
+               result = ps3_repository_read_dev_id(bus_index, dev_index,
+                       &dev_id);
+
+               if (result) {
+                       pr_debug("%s:%d ps3_repository_read_dev_id"
+                               " (%u:%u) failed\n", __func__, __LINE__,
+                               bus_index, dev_index);
+                       continue;
+               }
+
+               pr_debug("%s:%d  (%u:%u): dev_type %u, dev_id %u\n", __func__,
+                       __LINE__, bus_index, dev_index, dev_type, dev_id);
+
+               ps3_repository_dump_resource_info(bus_index, dev_index);
+       }
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+
+int ps3_repository_dump_bus_info(void)
+{
+       int result = 0;
+       unsigned int bus_index;
+
+       pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+       for (bus_index = 0; bus_index < 10; bus_index++) {
+               enum ps3_bus_type bus_type;
+               unsigned int bus_id;
+               unsigned int num_dev;
+
+               result = ps3_repository_read_bus_type(bus_index, &bus_type);
+
+               if (result) {
+                       pr_debug("%s:%d read_bus_type(%u) failed\n",
+                               __func__, __LINE__, bus_index);
+                       break;
+               }
+
+               result = ps3_repository_read_bus_id(bus_index, &bus_id);
+
+               if (result) {
+                       pr_debug("%s:%d read_bus_id(%u) failed\n",
+                               __func__, __LINE__, bus_index);
+                       continue;
+               }
+
+               if (bus_index != bus_id)
+                       pr_debug("%s:%d bus_index != bus_id\n",
+                               __func__, __LINE__);
+
+               result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+               if (result) {
+                       pr_debug("%s:%d read_bus_num_dev(%u) failed\n",
+                               __func__, __LINE__, bus_index);
+                       continue;
+               }
+
+               pr_debug("%s:%d bus_%u: bus_type %u, bus_id %u, num_dev %u\n",
+                       __func__, __LINE__, bus_index, bus_type, bus_id,
+                       num_dev);
+
+               dump_device_info(bus_index, num_dev);
+       }
+
+       pr_debug(" <- %s:%d\n", __func__, __LINE__);
+       return result;
+}
+#endif /* defined(DEBUG) */
+
+static int find_device(unsigned int bus_index, unsigned int num_dev,
+       unsigned int start_dev_index, enum ps3_dev_type dev_type,
+       struct ps3_repository_device *dev)
+{
+       int result = 0;
+       unsigned int dev_index;
+
+       pr_debug("%s:%d: find dev_type %u\n", __func__, __LINE__, dev_type);
+
+       dev->dev_index = UINT_MAX;
+
+       for (dev_index = start_dev_index; dev_index < num_dev; dev_index++) {
+               enum ps3_dev_type x;
+
+               result = ps3_repository_read_dev_type(bus_index, dev_index,
+                       &x);
+
+               if (result) {
+                       pr_debug("%s:%d read_dev_type failed\n",
+                               __func__, __LINE__);
+                       return result;
+               }
+
+               if (x == dev_type)
+                       break;
+       }
+
+       BUG_ON(dev_index == num_dev);
+
+       pr_debug("%s:%d: found dev_type %u at dev_index %u\n",
+               __func__, __LINE__, dev_type, dev_index);
+
+       result = ps3_repository_read_dev_id(bus_index, dev_index,
+               &dev->did.dev_id);
+
+       if (result) {
+               pr_debug("%s:%d read_dev_id failed\n",
+                       __func__, __LINE__);
+               return result;
+       }
+
+       dev->dev_index = dev_index;
+
+       pr_debug("%s:%d found: dev_id %u\n", __func__, __LINE__,
+               dev->did.dev_id);
+
+       return result;
+}
+
+int ps3_repository_find_device (enum ps3_bus_type bus_type,
+       enum ps3_dev_type dev_type,
+       const struct ps3_repository_device *start_dev,
+       struct ps3_repository_device *dev)
+{
+       int result = 0;
+       unsigned int bus_index;
+       unsigned int num_dev;
+
+       pr_debug("%s:%d: find bus_type %u, dev_type %u\n", __func__, __LINE__,
+               bus_type, dev_type);
+
+       dev->bus_index = UINT_MAX;
+
+       for (bus_index = start_dev ? start_dev->bus_index : 0; bus_index < 10;
+               bus_index++) {
+               enum ps3_bus_type x;
+
+               result = ps3_repository_read_bus_type(bus_index, &x);
+
+               if (result) {
+                       pr_debug("%s:%d read_bus_type failed\n",
+                               __func__, __LINE__);
+                       return result;
+               }
+               if (x == bus_type)
+                       break;
+       }
+
+       BUG_ON(bus_index == 10);
+
+       pr_debug("%s:%d: found bus_type %u at bus_index %u\n",
+               __func__, __LINE__, bus_type, bus_index);
+
+       result = ps3_repository_read_bus_num_dev(bus_index, &num_dev);
+
+       if (result) {
+               pr_debug("%s:%d read_bus_num_dev failed\n",
+                       __func__, __LINE__);
+               return result;
+       }
+
+       result = find_device(bus_index, num_dev, start_dev
+               ? start_dev->dev_index + 1 : 0, dev_type, dev);
+
+       if (result) {
+               pr_debug("%s:%d get_did failed\n", __func__, __LINE__);
+               return result;
+       }
+
+       result = ps3_repository_read_bus_id(bus_index, &dev->did.bus_id);
+
+       if (result) {
+               pr_debug("%s:%d read_bus_id failed\n",
+                       __func__, __LINE__);
+               return result;
+       }
+
+       dev->bus_index = bus_index;
+
+       pr_debug("%s:%d found: bus_id %u, dev_id %u\n",
+               __func__, __LINE__, dev->did.bus_id, dev->did.dev_id);
+
+       return result;
+}
+
+int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+       enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
+{
+       int result = 0;
+       unsigned int res_index;
+
+       pr_debug("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
+
+       *interrupt_id = UINT_MAX;
+
+       for (res_index = 0; res_index < 10; res_index++) {
+               enum ps3_interrupt_type t;
+               unsigned int id;
+
+               result = ps3_repository_read_dev_intr(dev->bus_index,
+                       dev->dev_index, res_index, &t, &id);
+
+               if (result) {
+                       pr_debug("%s:%d read_dev_intr failed\n",
+                               __func__, __LINE__);
+                       return result;
+               }
+
+               if (t == intr_type) {
+                       *interrupt_id = id;
+                       break;
+               }
+       }
+
+       BUG_ON(res_index == 10);
+
+       pr_debug("%s:%d: found intr_type %u at res_index %u\n",
+               __func__, __LINE__, intr_type, res_index);
+
+       return result;
+}
+
+int ps3_repository_find_region(const struct ps3_repository_device *dev,
+       enum ps3_region_type reg_type, u64 *bus_addr, u64 *len)
+{
+       int result = 0;
+       unsigned int res_index;
+
+       pr_debug("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
+
+       *bus_addr = *len = 0;
+
+       for (res_index = 0; res_index < 10; res_index++) {
+               enum ps3_region_type t;
+               u64 a;
+               u64 l;
+
+               result = ps3_repository_read_dev_reg(dev->bus_index,
+                       dev->dev_index, res_index, &t, &a, &l);
+
+               if (result) {
+                       pr_debug("%s:%d read_dev_reg failed\n",
+                               __func__, __LINE__);
+                       return result;
+               }
+
+               if (t == reg_type) {
+                       *bus_addr = a;
+                       *len = l;
+                       break;
+               }
+       }
+
+       BUG_ON(res_index == 10);
+
+       pr_debug("%s:%d: found reg_type %u at res_index %u\n",
+               __func__, __LINE__, reg_type, res_index);
+
+       return result;
+}
+
+int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
+{
+       return read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("pu", 0),
+               ppe_id,
+               make_field("rm_size", 0),
+               rm_size, 0);
+}
+
+int ps3_repository_read_region_total(u64 *region_total)
+{
+       return read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("rgntotal", 0),
+               0, 0,
+               region_total, 0);
+}
+
+/**
+ * ps3_repository_read_mm_info - Read mm info for single pu system.
+ * @rm_base: Real mode memory base address.
+ * @rm_size: Real mode memory size.
+ * @region_total: Maximum memory region size.
+ */
+
+int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
+{
+       int result;
+       u64 ppe_id;
+
+       lv1_get_logical_ppe_id(&ppe_id);
+       *rm_base = 0;
+       result = ps3_repository_read_rm_size(ppe_id, rm_size);
+       return result ? result
+               : ps3_repository_read_region_total(region_total);
+}
+
+/**
+ * ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
+ * @num_spu: Number of physical spus.
+ */
+
+int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("spun", 0),
+               0, 0,
+               &v1, 0);
+       *num_spu_reserved = v1;
+       return result;
+}
+
+/**
+ * ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
+ * @num_resource_id: Number of spu resource ids.
+ */
+
+int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("spursvn", 0),
+               0, 0,
+               &v1, 0);
+       *num_resource_id = v1;
+       return result;
+}
+
+/**
+ * ps3_repository_read_spu_resource_id - spu resource reservation id value.
+ * @res_index: Resource reservation index.
+ * @resource_type: Resource reservation type.
+ * @resource_id: Resource reservation id.
+ */
+
+int ps3_repository_read_spu_resource_id(unsigned int res_index,
+       enum ps3_spu_resource_type* resource_type, unsigned int *resource_id)
+{
+       int result;
+       u64 v1;
+       u64 v2;
+
+       result = read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("spursv", 0),
+               res_index,
+               0,
+               &v1, &v2);
+       *resource_type = v1;
+       *resource_id = v2;
+       return result;
+}
+
+int ps3_repository_read_boot_dat_address(u64 *address)
+{
+       return read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("boot_dat", 0),
+               make_field("address", 0),
+               0,
+               address, 0);
+}
+
+int ps3_repository_read_boot_dat_size(unsigned int *size)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_CURRENT,
+               make_first_field("bi", 0),
+               make_field("boot_dat", 0),
+               make_field("size", 0),
+               0,
+               &v1, 0);
+       *size = v1;
+       return result;
+}
+
+/**
+  * ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
+  * address: lpar address of cell_ext_os_area
+  * @size: size of cell_ext_os_area
+  */
+
+int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
+{
+       int result;
+
+       *size = 0;
+       result = ps3_repository_read_boot_dat_address(lpar_addr);
+       return result ? result
+               : ps3_repository_read_boot_dat_size(size);
+}
+
+int ps3_repository_read_num_be(unsigned int *num_be)
+{
+       int result;
+       u64 v1;
+
+       result = read_node(PS3_LPAR_ID_PME,
+               make_first_field("ben", 0),
+               0,
+               0,
+               0,
+               &v1, 0);
+       *num_be = v1;
+       return result;
+}
+
+int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
+{
+       return read_node(PS3_LPAR_ID_PME,
+               make_first_field("be", be_index),
+               0,
+               0,
+               0,
+               node_id, 0);
+}
+
+int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+{
+       return read_node(PS3_LPAR_ID_PME,
+               make_first_field("be", 0),
+               node_id,
+               make_field("clock", 0),
+               0,
+               tb_freq, 0);
+}
+
+int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+{
+       int result;
+       u64 node_id;
+
+       *tb_freq = 0;
+       result = ps3_repository_read_be_node_id(0, &node_id);
+       return result ? result
+               : ps3_repository_read_tb_freq(node_id, tb_freq);
+}
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
new file mode 100644 (file)
index 0000000..d8b5cad
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ *  PS3 platform setup routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/kexec.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/time.h>
+#include <asm/iommu.h>
+#include <asm/udbg.h>
+#include <asm/prom.h>
+#include <asm/lv1call.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static void ps3_show_cpuinfo(struct seq_file *m)
+{
+       seq_printf(m, "machine\t\t: %s\n", ppc_md.name);
+}
+
+static void ps3_power_save(void)
+{
+       /*
+        * lv1_pause() puts the PPE thread into inactive state until an
+        * irq on an unmasked plug exists. MSR[EE] has no effect.
+        * flags: 0 = wake on DEC interrupt, 1 = ignore DEC interrupt.
+        */
+
+       lv1_pause(0);
+}
+
+static void ps3_panic(char *str)
+{
+       DBG("%s:%d %s\n", __func__, __LINE__, str);
+
+#ifdef CONFIG_SMP
+       smp_send_stop();
+#endif
+       printk("\n");
+       printk("   System does not reboot automatically.\n");
+       printk("   Please press POWER button.\n");
+       printk("\n");
+
+       for (;;) ;
+}
+
+static void __init ps3_setup_arch(void)
+{
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       ps3_spu_set_platform();
+       ps3_map_htab();
+
+#ifdef CONFIG_SMP
+       smp_init_ps3();
+#endif
+
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
+       ppc_md.power_save = ps3_power_save;
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void __init ps3_progress(char *s, unsigned short hex)
+{
+       printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static int __init ps3_probe(void)
+{
+       unsigned long htab_size;
+       unsigned long dt_root;
+
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       dt_root = of_get_flat_dt_root();
+       if (!of_flat_dt_is_compatible(dt_root, "PS3"))
+               return 0;
+
+       powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
+
+       ps3_os_area_init();
+       ps3_mm_init();
+       ps3_mm_vas_create(&htab_size);
+       ps3_hpte_init(htab_size);
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+       return 1;
+}
+
+#if defined(CONFIG_KEXEC)
+static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       if (secondary) {
+               int cpu;
+               for_each_online_cpu(cpu)
+                       if (cpu)
+                               ps3_smp_cleanup_cpu(cpu);
+       } else
+               ps3_smp_cleanup_cpu(0);
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+
+static void ps3_machine_kexec(struct kimage *image)
+{
+       unsigned long ppe_id;
+
+       DBG(" -> %s:%d\n", __func__, __LINE__);
+
+       lv1_get_logical_ppe_id(&ppe_id);
+       lv1_configure_irq_state_bitmap(ppe_id, 0, 0);
+       ps3_mm_shutdown();
+       ps3_mm_vas_destroy();
+
+       default_machine_kexec(image);
+
+       DBG(" <- %s:%d\n", __func__, __LINE__);
+}
+#endif
+
+define_machine(ps3) {
+       .name                           = "PS3",
+       .probe                          = ps3_probe,
+       .setup_arch                     = ps3_setup_arch,
+       .show_cpuinfo                   = ps3_show_cpuinfo,
+       .init_IRQ                       = ps3_init_IRQ,
+       .panic                          = ps3_panic,
+       .get_boot_time                  = ps3_get_boot_time,
+       .set_rtc_time                   = ps3_set_rtc_time,
+       .get_rtc_time                   = ps3_get_rtc_time,
+       .calibrate_decr                 = ps3_calibrate_decr,
+       .progress                       = ps3_progress,
+#if defined(CONFIG_KEXEC)
+       .kexec_cpu_down                 = ps3_kexec_cpu_down,
+       .machine_kexec                  = ps3_machine_kexec,
+       .machine_kexec_prepare          = default_machine_kexec_prepare,
+       .machine_crash_shutdown         = default_machine_crash_shutdown,
+#endif
+};
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
new file mode 100644 (file)
index 0000000..11d2080
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ *  PS3 SMP routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#if defined(DEBUG)
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...) do{if(0)printk(fmt);}while(0)
+#endif
+
+static irqreturn_t ipi_function_handler(int irq, void *msg)
+{
+       smp_message_recv((int)(long)msg);
+       return IRQ_HANDLED;
+}
+
+/**
+  * virqs - a per cpu array of virqs for ipi use
+  */
+
+#define MSG_COUNT 4
+static DEFINE_PER_CPU(unsigned int, virqs[MSG_COUNT]);
+
+static const char *names[MSG_COUNT] = {
+       "ipi call",
+       "ipi reschedule",
+       "ipi migrate",
+       "ipi debug brk"
+};
+
+static void do_message_pass(int target, int msg)
+{
+       int result;
+       unsigned int virq;
+
+       if (msg >= MSG_COUNT) {
+               DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg);
+               return;
+       }
+
+       virq = per_cpu(virqs, target)[msg];
+       result = ps3_send_event_locally(virq);
+
+       if (result)
+               DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
+                       " (%d)\n", __func__, __LINE__, target, msg, result);
+}
+
+static void ps3_smp_message_pass(int target, int msg)
+{
+       int cpu;
+
+       if (target < NR_CPUS)
+               do_message_pass(target, msg);
+       else if (target == MSG_ALL_BUT_SELF) {
+               for_each_online_cpu(cpu)
+                       if (cpu != smp_processor_id())
+                               do_message_pass(cpu, msg);
+       } else {
+               for_each_online_cpu(cpu)
+                       do_message_pass(cpu, msg);
+       }
+}
+
+static int ps3_smp_probe(void)
+{
+       return 2;
+}
+
+static void __init ps3_smp_setup_cpu(int cpu)
+{
+       int result;
+       unsigned int *virqs = per_cpu(virqs, cpu);
+       int i;
+
+       DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+
+       /*
+        * Check assumptions on virqs[] indexing. If this
+        * check fails, then a different mapping of PPC_MSG_
+        * to index needs to be setup.
+        */
+
+       BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION  != 0);
+       BUILD_BUG_ON(PPC_MSG_RESCHEDULE     != 1);
+       BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+
+       for (i = 0; i < MSG_COUNT; i++) {
+               result = ps3_alloc_event_irq(&virqs[i]);
+
+               if (result)
+                       continue;
+
+               DBG("%s:%d: (%d, %d) => virq %u\n",
+                       __func__, __LINE__, cpu, i, virqs[i]);
+
+
+               request_irq(virqs[i], ipi_function_handler, IRQF_DISABLED,
+                       names[i], (void*)(long)i);
+       }
+
+       ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
+
+       DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+void ps3_smp_cleanup_cpu(int cpu)
+{
+       unsigned int *virqs = per_cpu(virqs, cpu);
+       int i;
+
+       DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
+       for (i = 0; i < MSG_COUNT; i++) {
+               ps3_free_event_irq(virqs[i]);
+               free_irq(virqs[i], (void*)(long)i);
+               virqs[i] = NO_IRQ;
+       }
+       DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
+}
+
+static struct smp_ops_t ps3_smp_ops = {
+       .probe          = ps3_smp_probe,
+       .message_pass   = ps3_smp_message_pass,
+       .kick_cpu       = smp_generic_kick_cpu,
+       .setup_cpu      = ps3_smp_setup_cpu,
+};
+
+void smp_init_ps3(void)
+{
+       DBG(" -> %s\n", __func__);
+       smp_ops = &ps3_smp_ops;
+       DBG(" <- %s\n", __func__);
+}
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
new file mode 100644 (file)
index 0000000..644532c
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+ *  PS3 Platform spu routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mmzone.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+
+/* spu_management_ops */
+
+/**
+ * enum spe_type - Type of spe to create.
+ * @spe_type_logical: Standard logical spe.
+ *
+ * For use with lv1_construct_logical_spe().  The current HV does not support
+ * any types other than those listed.
+ */
+
+enum spe_type {
+       SPE_TYPE_LOGICAL = 0,
+};
+
+/**
+ * struct spe_shadow - logical spe shadow register area.
+ *
+ * Read-only shadow of spe registers.
+ */
+
+struct spe_shadow {
+       u8 padding_0000[0x0140];
+       u64 int_status_class0_RW;       /* 0x0140 */
+       u64 int_status_class1_RW;       /* 0x0148 */
+       u64 int_status_class2_RW;       /* 0x0150 */
+       u8 padding_0158[0x0610-0x0158];
+       u64 mfc_dsisr_RW;               /* 0x0610 */
+       u8 padding_0618[0x0620-0x0618];
+       u64 mfc_dar_RW;                 /* 0x0620 */
+       u8 padding_0628[0x0800-0x0628];
+       u64 mfc_dsipr_R;                /* 0x0800 */
+       u8 padding_0808[0x0810-0x0808];
+       u64 mfc_lscrr_R;                /* 0x0810 */
+       u8 padding_0818[0x0c00-0x0818];
+       u64 mfc_cer_R;                  /* 0x0c00 */
+       u8 padding_0c08[0x0f00-0x0c08];
+       u64 spe_execution_status;       /* 0x0f00 */
+       u8 padding_0f08[0x1000-0x0f08];
+} __attribute__ ((packed));
+
+
+/**
+ * enum spe_ex_state - Logical spe execution state.
+ * @spe_ex_state_unexecutable: Uninitialized.
+ * @spe_ex_state_executable: Enabled, not ready.
+ * @spe_ex_state_executed: Ready for use.
+ *
+ * The execution state (status) of the logical spe as reported in
+ * struct spe_shadow:spe_execution_status.
+ */
+
+enum spe_ex_state {
+       SPE_EX_STATE_UNEXECUTABLE = 0,
+       SPE_EX_STATE_EXECUTABLE = 2,
+       SPE_EX_STATE_EXECUTED = 3,
+};
+
+/**
+ * struct priv1_cache - Cached values of priv1 registers.
+ * @masks[]: Array of cached spe interrupt masks, indexed by class.
+ * @sr1: Cached mfc_sr1 register.
+ * @tclass_id: Cached mfc_tclass_id register.
+ */
+
+struct priv1_cache {
+       u64 masks[3];
+       u64 sr1;
+       u64 tclass_id;
+};
+
+/**
+ * struct spu_pdata - Platform state variables.
+ * @spe_id: HV spe id returned by lv1_construct_logical_spe().
+ * @resource_id: HV spe resource id returned by
+ *     ps3_repository_read_spe_resource_id().
+ * @priv2_addr: lpar address of spe priv2 area returned by
+ *     lv1_construct_logical_spe().
+ * @shadow_addr: lpar address of spe register shadow area returned by
+ *     lv1_construct_logical_spe().
+ * @shadow: Virtual (ioremap) address of spe register shadow area.
+ * @cache: Cached values of priv1 registers.
+ */
+
+struct spu_pdata {
+       u64 spe_id;
+       u64 resource_id;
+       u64 priv2_addr;
+       u64 shadow_addr;
+       struct spe_shadow __iomem *shadow;
+       struct priv1_cache cache;
+};
+
+static struct spu_pdata *spu_pdata(struct spu *spu)
+{
+       return spu->pdata;
+}
+
+#define dump_areas(_a, _b, _c, _d, _e) \
+       _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
+static void _dump_areas(unsigned int spe_id, unsigned long priv2,
+       unsigned long problem, unsigned long ls, unsigned long shadow,
+       const char* func, int line)
+{
+       pr_debug("%s:%d: spe_id:  %xh (%u)\n", func, line, spe_id, spe_id);
+       pr_debug("%s:%d: priv2:   %lxh\n", func, line, priv2);
+       pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
+       pr_debug("%s:%d: ls:      %lxh\n", func, line, ls);
+       pr_debug("%s:%d: shadow:  %lxh\n", func, line, shadow);
+}
+
+static unsigned long get_vas_id(void)
+{
+       unsigned long id;
+
+       lv1_get_logical_ppe_id(&id);
+       lv1_get_virtual_address_space_id_of_ppe(id, &id);
+
+       return id;
+}
+
+static int __init construct_spu(struct spu *spu)
+{
+       int result;
+       unsigned long unused;
+
+       result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
+               PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
+               &spu_pdata(spu)->priv2_addr, &spu->problem_phys,
+               &spu->local_store_phys, &unused,
+               &spu_pdata(spu)->shadow_addr,
+               &spu_pdata(spu)->spe_id);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               return result;
+       }
+
+       return result;
+}
+
+static int __init add_spu_pages(unsigned long start_addr, unsigned long size)
+{
+       int result;
+       unsigned long start_pfn;
+       unsigned long nr_pages;
+       struct pglist_data *pgdata;
+       struct zone *zone;
+
+       BUG_ON(!mem_init_done);
+
+       start_pfn = start_addr >> PAGE_SHIFT;
+       nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       pgdata = NODE_DATA(0);
+       zone = pgdata->node_zones;
+
+       result = __add_pages(zone, start_pfn, nr_pages);
+
+       if (result)
+               pr_debug("%s:%d: __add_pages failed: (%d)\n",
+                       __func__, __LINE__, result);
+
+       return result;
+}
+
+static void spu_unmap(struct spu *spu)
+{
+       iounmap(spu->priv2);
+       iounmap(spu->problem);
+       iounmap((__force u8 __iomem *)spu->local_store);
+       iounmap(spu_pdata(spu)->shadow);
+}
+
+static int __init setup_areas(struct spu *spu)
+{
+       struct table {char* name; unsigned long addr; unsigned long size;};
+       int result;
+
+       /* setup pages */
+
+       result = add_spu_pages(spu->local_store_phys, LS_SIZE);
+       if (result)
+               goto fail_add;
+
+       result = add_spu_pages(spu->problem_phys, sizeof(struct spu_problem));
+       if (result)
+               goto fail_add;
+
+       /* ioremap */
+
+       spu_pdata(spu)->shadow = __ioremap(
+               spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow),
+               PAGE_READONLY | _PAGE_NO_CACHE | _PAGE_GUARDED);
+       if (!spu_pdata(spu)->shadow) {
+               pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
+               goto fail_ioremap;
+       }
+
+       spu->local_store = ioremap(spu->local_store_phys, LS_SIZE);
+       if (!spu->local_store) {
+               pr_debug("%s:%d: ioremap local_store failed\n",
+                       __func__, __LINE__);
+               goto fail_ioremap;
+       }
+
+       spu->problem = ioremap(spu->problem_phys,
+               sizeof(struct spu_problem));
+       if (!spu->problem) {
+               pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
+               goto fail_ioremap;
+       }
+
+       spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
+               sizeof(struct spu_priv2));
+       if (!spu->priv2) {
+               pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
+               goto fail_ioremap;
+       }
+
+       dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
+               spu->problem_phys, spu->local_store_phys,
+               spu_pdata(spu)->shadow_addr);
+       dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
+               (unsigned long)spu->problem, (unsigned long)spu->local_store,
+               (unsigned long)spu_pdata(spu)->shadow);
+
+       return 0;
+
+fail_ioremap:
+       spu_unmap(spu);
+fail_add:
+       return result;
+}
+
+static int __init setup_interrupts(struct spu *spu)
+{
+       int result;
+
+       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 0,
+               &spu->irqs[0]);
+
+       if (result)
+               goto fail_alloc_0;
+
+       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 1,
+               &spu->irqs[1]);
+
+       if (result)
+               goto fail_alloc_1;
+
+       result = ps3_alloc_spe_irq(spu_pdata(spu)->spe_id, 2,
+               &spu->irqs[2]);
+
+       if (result)
+               goto fail_alloc_2;
+
+       return result;
+
+fail_alloc_2:
+       ps3_free_spe_irq(spu->irqs[1]);
+fail_alloc_1:
+       ps3_free_spe_irq(spu->irqs[0]);
+fail_alloc_0:
+       spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+       return result;
+}
+
+static int __init enable_spu(struct spu *spu)
+{
+       int result;
+
+       result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
+               spu_pdata(spu)->resource_id);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               goto fail_enable;
+       }
+
+       result = setup_areas(spu);
+
+       if (result)
+               goto fail_areas;
+
+       result = setup_interrupts(spu);
+
+       if (result)
+               goto fail_interrupts;
+
+       return 0;
+
+fail_interrupts:
+       spu_unmap(spu);
+fail_areas:
+       lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+fail_enable:
+       return result;
+}
+
+static int ps3_destroy_spu(struct spu *spu)
+{
+       int result;
+
+       pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+       result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
+       BUG_ON(result);
+
+       ps3_free_spe_irq(spu->irqs[2]);
+       ps3_free_spe_irq(spu->irqs[1]);
+       ps3_free_spe_irq(spu->irqs[0]);
+
+       spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = NO_IRQ;
+
+       spu_unmap(spu);
+
+       result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
+       BUG_ON(result);
+
+       kfree(spu->pdata);
+       spu->pdata = NULL;
+
+       return 0;
+}
+
+static int __init ps3_create_spu(struct spu *spu, void *data)
+{
+       int result;
+
+       pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
+
+       spu->pdata = kzalloc(sizeof(struct spu_pdata),
+               GFP_KERNEL);
+
+       if (!spu->pdata) {
+               result = -ENOMEM;
+               goto fail_malloc;
+       }
+
+       spu_pdata(spu)->resource_id = (unsigned long)data;
+
+       /* Init cached reg values to HV defaults. */
+
+       spu_pdata(spu)->cache.sr1 = 0x33;
+
+       result = construct_spu(spu);
+
+       if (result)
+               goto fail_construct;
+
+       /* For now, just go ahead and enable it. */
+
+       result = enable_spu(spu);
+
+       if (result)
+               goto fail_enable;
+
+       /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
+
+       /* need something better here!!! */
+       while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
+               != SPE_EX_STATE_EXECUTED)
+               (void)0;
+
+       return result;
+
+fail_enable:
+fail_construct:
+       ps3_destroy_spu(spu);
+fail_malloc:
+       return result;
+}
+
+static int __init ps3_enumerate_spus(int (*fn)(void *data))
+{
+       int result;
+       unsigned int num_resource_id;
+       unsigned int i;
+
+       result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
+
+       pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
+               num_resource_id);
+
+       /*
+        * For now, just create logical spus equal to the number
+        * of physical spus reserved for the partition.
+        */
+
+       for (i = 0; i < num_resource_id; i++) {
+               enum ps3_spu_resource_type resource_type;
+               unsigned int resource_id;
+
+               result = ps3_repository_read_spu_resource_id(i,
+                       &resource_type, &resource_id);
+
+               if (result)
+                       break;
+
+               if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
+                       result = fn((void*)(unsigned long)resource_id);
+
+                       if (result)
+                               break;
+               }
+       }
+
+       if (result)
+               printk(KERN_WARNING "%s:%d: Error initializing spus\n",
+                       __func__, __LINE__);
+
+       return result;
+}
+
+const struct spu_management_ops spu_management_ps3_ops = {
+       .enumerate_spus = ps3_enumerate_spus,
+       .create_spu = ps3_create_spu,
+       .destroy_spu = ps3_destroy_spu,
+};
+
+/* spu_priv1_ops */
+
+static void int_mask_and(struct spu *spu, int class, u64 mask)
+{
+       u64 old_mask;
+
+       /* are these serialized by caller??? */
+       old_mask = spu_int_mask_get(spu, class);
+       spu_int_mask_set(spu, class, old_mask & mask);
+}
+
+static void int_mask_or(struct spu *spu, int class, u64 mask)
+{
+       u64 old_mask;
+
+       old_mask = spu_int_mask_get(spu, class);
+       spu_int_mask_set(spu, class, old_mask | mask);
+}
+
+static void int_mask_set(struct spu *spu, int class, u64 mask)
+{
+       spu_pdata(spu)->cache.masks[class] = mask;
+       lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
+               spu_pdata(spu)->cache.masks[class]);
+}
+
+static u64 int_mask_get(struct spu *spu, int class)
+{
+       return spu_pdata(spu)->cache.masks[class];
+}
+
+static void int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+       /* Note that MFC_DSISR will be cleared when class1[MF] is set. */
+
+       lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
+               stat, 0);
+}
+
+static u64 int_stat_get(struct spu *spu, int class)
+{
+       u64 stat;
+
+       lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
+       return stat;
+}
+
+static void cpu_affinity_set(struct spu *spu, int cpu)
+{
+       /* No support. */
+}
+
+static u64 mfc_dar_get(struct spu *spu)
+{
+       return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
+}
+
+static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+       /* Nothing to do, cleared in int_stat_clear(). */
+}
+
+static u64 mfc_dsisr_get(struct spu *spu)
+{
+       return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
+}
+
+static void mfc_sdr_setup(struct spu *spu)
+{
+       /* Nothing to do. */
+}
+
+static void mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+       /* Check bits allowed by HV. */
+
+       static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
+               | MFC_STATE1_PROBLEM_STATE_MASK);
+
+       BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
+
+       spu_pdata(spu)->cache.sr1 = sr1;
+       lv1_set_spe_privilege_state_area_1_register(
+               spu_pdata(spu)->spe_id,
+               offsetof(struct spu_priv1, mfc_sr1_RW),
+               spu_pdata(spu)->cache.sr1);
+}
+
+static u64 mfc_sr1_get(struct spu *spu)
+{
+       return spu_pdata(spu)->cache.sr1;
+}
+
+static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+       spu_pdata(spu)->cache.tclass_id = tclass_id;
+       lv1_set_spe_privilege_state_area_1_register(
+               spu_pdata(spu)->spe_id,
+               offsetof(struct spu_priv1, mfc_tclass_id_RW),
+               spu_pdata(spu)->cache.tclass_id);
+}
+
+static u64 mfc_tclass_id_get(struct spu *spu)
+{
+       return spu_pdata(spu)->cache.tclass_id;
+}
+
+static void tlb_invalidate(struct spu *spu)
+{
+       /* Nothing to do. */
+}
+
+static void resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+       /* No support. */
+}
+
+static u64 resource_allocation_groupID_get(struct spu *spu)
+{
+       return 0; /* No support. */
+}
+
+static void resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+       /* No support. */
+}
+
+static u64 resource_allocation_enable_get(struct spu *spu)
+{
+       return 0; /* No support. */
+}
+
+const struct spu_priv1_ops spu_priv1_ps3_ops = {
+       .int_mask_and = int_mask_and,
+       .int_mask_or = int_mask_or,
+       .int_mask_set = int_mask_set,
+       .int_mask_get = int_mask_get,
+       .int_stat_clear = int_stat_clear,
+       .int_stat_get = int_stat_get,
+       .cpu_affinity_set = cpu_affinity_set,
+       .mfc_dar_get = mfc_dar_get,
+       .mfc_dsisr_set = mfc_dsisr_set,
+       .mfc_dsisr_get = mfc_dsisr_get,
+       .mfc_sdr_setup = mfc_sdr_setup,
+       .mfc_sr1_set = mfc_sr1_set,
+       .mfc_sr1_get = mfc_sr1_get,
+       .mfc_tclass_id_set = mfc_tclass_id_set,
+       .mfc_tclass_id_get = mfc_tclass_id_get,
+       .tlb_invalidate = tlb_invalidate,
+       .resource_allocation_groupID_set = resource_allocation_groupID_set,
+       .resource_allocation_groupID_get = resource_allocation_groupID_get,
+       .resource_allocation_enable_set = resource_allocation_enable_set,
+       .resource_allocation_enable_get = resource_allocation_enable_get,
+};
+
+void ps3_spu_set_platform(void)
+{
+       spu_priv1_ops = &spu_priv1_ps3_ops;
+       spu_management_ops = &spu_management_ps3_ops;
+}
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
new file mode 100644 (file)
index 0000000..1bae8b1
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ *  PS3 time and rtc routines.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/rtc.h>
+#include <asm/lv1call.h>
+#include <asm/ps3.h>
+
+#include "platform.h"
+
+#define dump_tm(_a) _dump_tm(_a, __func__, __LINE__)
+static void _dump_tm(const struct rtc_time *tm, const char* func, int line)
+{
+       pr_debug("%s:%d tm_sec  %d\n", func, line, tm->tm_sec);
+       pr_debug("%s:%d tm_min  %d\n", func, line, tm->tm_min);
+       pr_debug("%s:%d tm_hour %d\n", func, line, tm->tm_hour);
+       pr_debug("%s:%d tm_mday %d\n", func, line, tm->tm_mday);
+       pr_debug("%s:%d tm_mon  %d\n", func, line, tm->tm_mon);
+       pr_debug("%s:%d tm_year %d\n", func, line, tm->tm_year);
+       pr_debug("%s:%d tm_wday %d\n", func, line, tm->tm_wday);
+}
+
+#define dump_time(_a) _dump_time(_a, __func__, __LINE__)
+static void __attribute__ ((unused)) _dump_time(int time, const char* func,
+       int line)
+{
+       struct rtc_time tm;
+
+       to_tm(time, &tm);
+
+       pr_debug("%s:%d time    %d\n", func, line, time);
+       _dump_tm(&tm, func, line);
+}
+
+/**
+ * rtc_shift - Difference in seconds between 1970 and the ps3 rtc value.
+ */
+
+static s64 rtc_shift;
+
+void __init ps3_calibrate_decr(void)
+{
+       int result;
+       u64 tmp;
+
+       result = ps3_repository_read_be_tb_freq(0, &tmp);
+       BUG_ON(result);
+
+       ppc_tb_freq = tmp;
+       ppc_proc_freq = ppc_tb_freq * 40;
+
+       rtc_shift = ps3_os_area_rtc_diff();
+}
+
+static u64 read_rtc(void)
+{
+       int result;
+       u64 rtc_val;
+       u64 tb_val;
+
+       result = lv1_get_rtc(&rtc_val, &tb_val);
+       BUG_ON(result);
+
+       return rtc_val;
+}
+
+int ps3_set_rtc_time(struct rtc_time *tm)
+{
+       u64 now = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+               tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+       rtc_shift = now - read_rtc();
+       return 0;
+}
+
+void ps3_get_rtc_time(struct rtc_time *tm)
+{
+       to_tm(read_rtc() + rtc_shift, tm);
+       tm->tm_year -= 1900;
+       tm->tm_mon -= 1;
+}
+
+unsigned long __init ps3_get_boot_time(void)
+{
+       return read_rtc() + rtc_shift;
+}
index 137077451316b7f83f8e88f1a0aebc026a4b3092..49037edf7d3999545c9190d2d7fd0e0594330f49 100644 (file)
@@ -37,8 +37,8 @@
 /* EEH event workqueue setup. */
 static DEFINE_SPINLOCK(eeh_eventlist_lock);
 LIST_HEAD(eeh_eventlist);
-static void eeh_thread_launcher(void *);
-DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+static void eeh_thread_launcher(struct work_struct *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
 
 /* Serialize reset sequences for a given pci device */
 DEFINE_MUTEX(eeh_event_mutex);
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy)
  * eeh_thread_launcher
  * @dummy - unused
  */
-static void eeh_thread_launcher(void *dummy)
+static void eeh_thread_launcher(struct work_struct *dummy)
 {
        if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
                printk(KERN_ERR "Failed to start EEH daemon\n");
index 556c279a789d4046d7c72647b271e2bffad7327a..3c95392f4f41d8edf944002824bb013069311f45 100644 (file)
@@ -309,7 +309,7 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
        tbl->it_size = size >> IOMMU_PAGE_SHIFT;
 }
 
-static void iommu_bus_setup_pSeries(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
 {
        struct device_node *dn;
        struct iommu_table *tbl;
@@ -318,10 +318,9 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
        struct pci_dn *pci;
        int children;
 
-       DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
-
        dn = pci_bus_to_OF_node(bus);
-       pci = PCI_DN(dn);
+
+       DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
 
        if (bus->self) {
                /* This is not a root bus, any setup will be done for the
@@ -329,6 +328,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
                 */
                return;
        }
+       pci = PCI_DN(dn);
 
        /* Check if the ISA bus on the system is under
         * this PHB.
@@ -390,17 +390,17 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
 }
 
 
-static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
+static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
 {
        struct iommu_table *tbl;
        struct device_node *dn, *pdn;
        struct pci_dn *ppci;
        const void *dma_window = NULL;
 
-       DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
-
        dn = pci_bus_to_OF_node(bus);
 
+       DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name);
+
        /* Find nearest ibm,dma-window, walking up the device tree */
        for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
                dma_window = get_property(pdn, "ibm,dma-window", NULL);
@@ -409,11 +409,15 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
        }
 
        if (dma_window == NULL) {
-               DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name);
+               DBG("  no ibm,dma-window property !\n");
                return;
        }
 
        ppci = PCI_DN(pdn);
+
+       DBG("  parent is %s, iommu_table: 0x%p\n",
+           pdn->full_name, ppci->iommu_table);
+
        if (!ppci->iommu_table) {
                /* Bussubno hasn't been copied yet.
                 * Do it now because iommu_table_setparms_lpar needs it.
@@ -427,6 +431,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
                iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
 
                ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
+               DBG("  created table: %p\n", ppci->iommu_table);
        }
 
        if (pdn != dn)
@@ -434,27 +439,27 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
 }
 
 
-static void iommu_dev_setup_pSeries(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
 {
-       struct device_node *dn, *mydn;
+       struct device_node *dn;
        struct iommu_table *tbl;
 
-       DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
+       DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
 
-       mydn = dn = pci_device_to_OF_node(dev);
+       dn = dev->dev.archdata.of_node;
 
        /* If we're the direct child of a root bus, then we need to allocate
         * an iommu table ourselves. The bus setup code should have setup
         * the window sizes already.
         */
        if (!dev->bus->self) {
+               struct pci_controller *phb = PCI_DN(dn)->phb;
+
                DBG(" --> first child, no bridge. Allocating iommu table.\n");
                tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
-                                  PCI_DN(dn)->phb->node);
-               iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
-               PCI_DN(dn)->iommu_table = iommu_init_table(tbl,
-                                               PCI_DN(dn)->phb->node);
-
+                                  phb->node);
+               iommu_table_setparms(phb, dn, tbl);
+               dev->dev.archdata.dma_data = iommu_init_table(tbl, phb->node);
                return;
        }
 
@@ -465,11 +470,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
        while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
                dn = dn->parent;
 
-       if (dn && PCI_DN(dn)) {
-               PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
-       } else {
-               DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
-       }
+       if (dn && PCI_DN(dn))
+               dev->dev.archdata.dma_data = PCI_DN(dn)->iommu_table;
+       else
+               printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
+                      pci_name(dev));
 }
 
 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
@@ -495,13 +500,15 @@ static struct notifier_block iommu_reconfig_nb = {
        .notifier_call = iommu_reconfig_notifier,
 };
 
-static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
+static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
 {
        struct device_node *pdn, *dn;
        struct iommu_table *tbl;
        const void *dma_window = NULL;
        struct pci_dn *pci;
 
+       DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+
        /* dev setup for LPAR is a little tricky, since the device tree might
         * contain the dma-window properties per-device and not neccesarily
         * for the bus. So we need to search upwards in the tree until we
@@ -509,9 +516,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
         * already allocated.
         */
        dn = pci_device_to_OF_node(dev);
-
-       DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
-            dev, pci_name(dev), dn->full_name);
+       DBG("  node is %s\n", dn->full_name);
 
        for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
             pdn = pdn->parent) {
@@ -520,16 +525,17 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
                        break;
        }
 
+       DBG("  parent is %s\n", pdn->full_name);
+
        /* Check for parent == NULL so we don't try to setup the empty EADS
         * slots on POWER4 machines.
         */
        if (dma_window == NULL || pdn->parent == NULL) {
-               DBG("No dma window for device, linking to parent\n");
-               PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
+               DBG("  no dma window for device, linking to parent\n");
+               dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
                return;
-       } else {
-               DBG("Found DMA window, allocating table\n");
        }
+       DBG("  found DMA window, table: %p\n", pci->iommu_table);
 
        pci = PCI_DN(pdn);
        if (!pci->iommu_table) {
@@ -542,24 +548,20 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
                iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
 
                pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
+               DBG("  created table: %p\n", pci->iommu_table);
        }
 
-       if (pdn != dn)
-               PCI_DN(dn)->iommu_table = pci->iommu_table;
+       dev->dev.archdata.dma_data = pci->iommu_table;
 }
 
-static void iommu_bus_setup_null(struct pci_bus *b) { }
-static void iommu_dev_setup_null(struct pci_dev *d) { }
-
 /* These are called very early. */
 void iommu_init_early_pSeries(void)
 {
        if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) {
                /* Direct I/O, IOMMU off */
-               ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-               ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-               pci_direct_iommu_init();
-
+               ppc_md.pci_dma_dev_setup = NULL;
+               ppc_md.pci_dma_bus_setup = NULL;
+               pci_dma_ops = &dma_direct_ops;
                return;
        }
 
@@ -572,19 +574,19 @@ void iommu_init_early_pSeries(void)
                        ppc_md.tce_free  = tce_free_pSeriesLP;
                }
                ppc_md.tce_get   = tce_get_pSeriesLP;
-               ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
-               ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
+               ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+               ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
        } else {
                ppc_md.tce_build = tce_build_pSeries;
                ppc_md.tce_free  = tce_free_pSeries;
                ppc_md.tce_get   = tce_get_pseries;
-               ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
-               ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
+               ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
+               ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
        }
 
 
        pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
 
-       pci_iommu_init();
+       pci_dma_ops = &dma_iommu_ops;
 }
 
index 1820a0b0a8c6e55cd3f7351e34ea68a2f0110ab5..721436db3ef0bf87967b2982ebe6deecfea4db83 100644 (file)
@@ -282,7 +282,7 @@ void vpa_init(int cpu)
        }
 }
 
-long pSeries_lpar_hpte_insert(unsigned long hpte_group,
+static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
                              unsigned long va, unsigned long pa,
                              unsigned long rflags, unsigned long vflags,
                              int psize)
@@ -506,7 +506,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
  * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
  * lock.
  */
-void pSeries_lpar_flush_hash_range(unsigned long number, int local)
+static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
 {
        int i;
        unsigned long flags = 0;
index 410a6bcc4ca008b9444e92ed567d9f29ac487577..715db5c89908f73435c178ba820a2a15bc3cb8d1 100644 (file)
@@ -29,8 +29,6 @@
 #include <asm/prom.h>
 #include <asm/ppc-pci.h>
 
-static int __devinitdata s7a_workaround = -1;
-
 #if 0
 void pcibios_name_device(struct pci_dev *dev)
 {
@@ -57,39 +55,6 @@ void pcibios_name_device(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
 #endif
 
-static void __devinit check_s7a(void)
-{
-       struct device_node *root;
-       const char *model;
-
-       s7a_workaround = 0;
-       root = of_find_node_by_path("/");
-       if (root) {
-               model = get_property(root, "model", NULL);
-               if (model && !strcmp(model, "IBM,7013-S7A"))
-                       s7a_workaround = 1;
-               of_node_put(root);
-       }
-}
-
-void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
-{
-       struct pci_dev *dev;
-
-       if (s7a_workaround < 0)
-               check_s7a();
-       list_for_each_entry(dev, &bus->devices, bus_list) {
-               pci_read_irq_line(dev);
-               if (s7a_workaround) {
-                       if (dev->irq > 16) {
-                               dev->irq -= 3;
-                               pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
-                                       dev->irq);
-                       }
-               }
-       }
-}
-
 static void __init pSeries_request_regions(void)
 {
        if (!isa_io_base)
index 6bfacc217085fbb871a8fa64eea6baa578e61281..ac56b868913a045ab44ce669b42b2076636c65f7 100644 (file)
@@ -93,8 +93,8 @@ pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
                if (list_empty(&dev->global_list)) {
                        int i;
 
-                       /* Need to setup IOMMU tables */
-                       ppc_md.iommu_dev_setup(dev);
+                       /* Fill device archdata and setup iommu table */
+                       pcibios_setup_new_device(dev);
 
                        if(fix_bus)
                                pcibios_fixup_device_resources(dev, bus);
@@ -195,7 +195,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
        phb = pcibios_alloc_controller(dn);
        if (!phb)
                return NULL;
-       setup_phb(dn, phb);
+       rtas_setup_phb(phb);
        pci_process_bridge_OF_ranges(phb, dn, 0);
 
        pci_setup_phb_io_dynamic(phb, primary);
index 1773103354be458c22c32e0f79f8a10bdac08868..4ad33e41b0082794582e9a8710d063aa6e7ab599 100644 (file)
@@ -268,11 +268,10 @@ static char * parse_next_property(char *buf, char *end, char **name, int *length
 static struct property *new_property(const char *name, const int length,
                                     const unsigned char *value, struct property *last)
 {
-       struct property *new = kmalloc(sizeof(*new), GFP_KERNEL);
+       struct property *new = kzalloc(sizeof(*new), GFP_KERNEL);
 
        if (!new)
                return NULL;
-       memset(new, 0, sizeof(*new));
 
        if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL)))
                goto cleanup;
index 89a8119f988d697f0c7fa0b8edfdf3f3feb3240a..0dc2548ca9bcdd45bebf827db6aa2fe813b20bb0 100644 (file)
@@ -347,6 +347,7 @@ static int __init pSeries_init_panel(void)
 }
 arch_initcall(pSeries_init_panel);
 
+#ifdef CONFIG_HOTPLUG_CPU
 static void pSeries_mach_cpu_die(void)
 {
        local_irq_disable();
@@ -357,6 +358,9 @@ static void pSeries_mach_cpu_die(void)
        BUG();
        for(;;);
 }
+#else
+#define pSeries_mach_cpu_die NULL
+#endif
 
 static int pseries_set_dabr(unsigned long dabr)
 {
@@ -553,7 +557,6 @@ define_machine(pseries) {
        .log_error              = pSeries_log_error,
        .pcibios_fixup          = pSeries_final_fixup,
        .pci_probe_mode         = pSeries_pci_probe_mode,
-       .irq_bus_setup          = pSeries_irq_bus_setup,
        .restart                = rtas_restart,
        .power_off              = rtas_power_off,
        .halt                   = rtas_halt,
index d071abe78ab196943a56b8ee9ce097650a34cb00..b5b2b1103de8d58dd2f49b6b766fe4d429913c46 100644 (file)
@@ -656,13 +656,38 @@ static void __init xics_setup_8259_cascade(void)
        set_irq_chained_handler(cascade, pseries_8259_cascade);
 }
 
+static struct device_node *cpuid_to_of_node(int cpu)
+{
+       struct device_node *np;
+       u32 hcpuid = get_hard_smp_processor_id(cpu);
+
+       for_each_node_by_type(np, "cpu") {
+               int i, len;
+               const u32 *intserv;
+
+               intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
+
+               if (!intserv)
+                       intserv = get_property(np, "reg", &len);
+
+               i = len / sizeof(u32);
+
+               while (i--)
+                       if (intserv[i] == hcpuid)
+                               return np;
+       }
+
+       return NULL;
+}
+
 void __init xics_init_IRQ(void)
 {
-       int i;
+       int i, j;
        struct device_node *np;
        u32 ilen, indx = 0;
-       const u32 *ireg;
+       const u32 *ireg, *isize;
        int found = 0;
+       u32 hcpuid;
 
        ppc64_boot_msg(0x20, "XICS Init");
 
@@ -683,26 +708,31 @@ void __init xics_init_IRQ(void)
        xics_init_host();
 
        /* Find the server numbers for the boot cpu. */
-       for (np = of_find_node_by_type(NULL, "cpu");
-            np;
-            np = of_find_node_by_type(np, "cpu")) {
-               ireg = get_property(np, "reg", &ilen);
-               if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) {
-                       ireg = get_property(np,
-                                       "ibm,ppc-interrupt-gserver#s", &ilen);
-                       i = ilen / sizeof(int);
-                       if (ireg && i > 0) {
-                               default_server = ireg[0];
-                               /* take last element */
-                               default_distrib_server = ireg[i-1];
-                       }
-                       ireg = get_property(np,
+       np = cpuid_to_of_node(boot_cpuid);
+       BUG_ON(!np);
+       ireg = get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+       if (!ireg)
+               goto skip_gserver_check;
+       i = ilen / sizeof(int);
+       hcpuid = get_hard_smp_processor_id(boot_cpuid);
+
+       /* Global interrupt distribution server is specified in the last
+        * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
+        * entry fom this property for current boot cpu id and use it as
+        * default distribution server
+        */
+       for (j = 0; j < i; j += 2) {
+               if (ireg[j] == hcpuid) {
+                       default_server = hcpuid;
+                       default_distrib_server = ireg[j+1];
+
+                       isize = get_property(np,
                                        "ibm,interrupt-server#-size", NULL);
-                       if (ireg)
-                               interrupt_server_size = *ireg;
-                       break;
+                       if (isize)
+                               interrupt_server_size = *isize;
                }
        }
+skip_gserver_check:
        of_node_put(np);
 
        if (firmware_has_feature(FW_FEATURE_LPAR))
index 91f052d8cce03100f10bc8bcf22eb4177d404bb2..6cc34597a620202e901927359bf9207664ffb0a8 100644 (file)
@@ -5,14 +5,13 @@ endif
 obj-$(CONFIG_MPIC)             += mpic.o
 obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
 obj-$(CONFIG_PPC_MPC106)       += grackle.o
-obj-$(CONFIG_BOOKE)            += dcr.o
-obj-$(CONFIG_40x)              += dcr.o
+obj-$(CONFIG_PPC_DCR)          += dcr.o dcr-low.o
 obj-$(CONFIG_U3_DART)          += dart_iommu.o
 obj-$(CONFIG_MMIO_NVRAM)       += mmio_nvram.o
 obj-$(CONFIG_FSL_SOC)          += fsl_soc.o
-obj-$(CONFIG_PPC_TODC)         += todc.o
 obj-$(CONFIG_TSI108_BRIDGE)    += tsi108_pci.o tsi108_dev.o
 obj-$(CONFIG_QUICC_ENGINE)     += qe_lib/
+obj-$(CONFIG_MTD)              += rom.o
 
 ifeq ($(CONFIG_PPC_MERGE),y)
 obj-$(CONFIG_PPC_I8259)                += i8259.o
index 572b7846cc77226870784995534d98ea794bd979..1488535b0e136ddea4452a16de9736395af73460 100644 (file)
@@ -48,9 +48,6 @@
 
 #include "dart.h"
 
-extern int iommu_is_off;
-extern int iommu_force_on;
-
 /* Physical base address and size of the DART table */
 unsigned long dart_tablebase; /* exported to htab_initialize */
 static unsigned long dart_tablesize;
@@ -289,24 +286,15 @@ static void iommu_table_dart_setup(void)
        set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
 }
 
-static void iommu_dev_setup_dart(struct pci_dev *dev)
+static void pci_dma_dev_setup_dart(struct pci_dev *dev)
 {
-       struct device_node *dn;
-
        /* We only have one iommu table on the mac for now, which makes
         * things simple. Setup all PCI devices to point to this table
-        *
-        * We must use pci_device_to_OF_node() to make sure that
-        * we get the real "final" pointer to the device in the
-        * pci_dev sysdata and not the temporary PHB one
         */
-       dn = pci_device_to_OF_node(dev);
-
-       if (dn)
-               PCI_DN(dn)->iommu_table = &iommu_table_dart;
+       dev->dev.archdata.dma_data = &iommu_table_dart;
 }
 
-static void iommu_bus_setup_dart(struct pci_bus *bus)
+static void pci_dma_bus_setup_dart(struct pci_bus *bus)
 {
        struct device_node *dn;
 
@@ -321,9 +309,6 @@ static void iommu_bus_setup_dart(struct pci_bus *bus)
                PCI_DN(dn)->iommu_table = &iommu_table_dart;
 }
 
-static void iommu_dev_setup_null(struct pci_dev *dev) { }
-static void iommu_bus_setup_null(struct pci_bus *bus) { }
-
 void iommu_init_early_dart(void)
 {
        struct device_node *dn;
@@ -344,22 +329,21 @@ void iommu_init_early_dart(void)
 
        /* Initialize the DART HW */
        if (dart_init(dn) == 0) {
-               ppc_md.iommu_dev_setup = iommu_dev_setup_dart;
-               ppc_md.iommu_bus_setup = iommu_bus_setup_dart;
+               ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
+               ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
 
                /* Setup pci_dma ops */
-               pci_iommu_init();
-
+               pci_dma_ops = &dma_iommu_ops;
                return;
        }
 
  bail:
        /* If init failed, use direct iommu and null setup functions */
-       ppc_md.iommu_dev_setup = iommu_dev_setup_null;
-       ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+       ppc_md.pci_dma_dev_setup = NULL;
+       ppc_md.pci_dma_bus_setup = NULL;
 
        /* Setup pci_dma ops */
-       pci_direct_iommu_init();
+       pci_dma_ops = &dma_direct_ops;
 }
 
 
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
new file mode 100644 (file)
index 0000000..2078f39
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * "Indirect" DCR access
+ *
+ * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of  the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+#define DCR_ACCESS_PROLOG(table) \
+       rlwinm  r3,r3,4,18,27;   \
+       lis     r5,table@h;      \
+       ori     r5,r5,table@l;   \
+       add     r3,r3,r5;        \
+       mtctr   r3;              \
+       bctr
+
+_GLOBAL(__mfdcr)
+       DCR_ACCESS_PROLOG(__mfdcr_table)
+
+_GLOBAL(__mtdcr)
+       DCR_ACCESS_PROLOG(__mtdcr_table)
+
+__mfdcr_table:
+       mfdcr  r3,0; blr
+__mtdcr_table:
+       mtdcr  0,r4; blr
+
+dcr     = 1
+        .rept   1023
+       mfdcr   r3,dcr; blr
+       mtdcr   dcr,r4; blr
+       dcr     = dcr + 1
+       .endr
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
new file mode 100644 (file)
index 0000000..dffeeae
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <asm/prom.h>
+#include <asm/dcr.h>
+
+unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
+{
+       unsigned int ds;
+       const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+       if (dr == NULL || ds & 1 || index >= (ds / 8))
+               return 0;
+
+       return dr[index * 2];
+}
+
+unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
+{
+       unsigned int ds;
+       const u32 *dr = get_property(np, "dcr-reg", &ds);
+
+       if (dr == NULL || ds & 1 || index >= (ds / 8))
+               return 0;
+
+       return dr[index * 2 + 1];
+}
+
+#ifndef CONFIG_PPC_DCR_NATIVE
+
+static struct device_node * find_dcr_parent(struct device_node * node)
+{
+       struct device_node *par, *tmp;
+       const u32 *p;
+
+       for (par = of_node_get(node); par;) {
+               if (get_property(par, "dcr-controller", NULL))
+                       break;
+               p = get_property(par, "dcr-parent", NULL);
+               tmp = par;
+               if (p == NULL)
+                       par = of_get_parent(par);
+               else
+                       par = of_find_node_by_phandle(*p);
+               of_node_put(tmp);
+       }
+       return par;
+}
+
+u64 of_translate_dcr_address(struct device_node *dev,
+                            unsigned int dcr_n,
+                            unsigned int *out_stride)
+{
+       struct device_node *dp;
+       const u32 *p;
+       unsigned int stride;
+       u64 ret;
+
+       dp = find_dcr_parent(dev);
+       if (dp == NULL)
+               return OF_BAD_ADDR;
+
+       /* Stride is not properly defined yet, default to 0x10 for Axon */
+       p = get_property(dp, "dcr-mmio-stride", NULL);
+       stride = (p == NULL) ? 0x10 : *p;
+
+       /* XXX FIXME: Which property name is to use of the 2 following ? */
+       p = get_property(dp, "dcr-mmio-range", NULL);
+       if (p == NULL)
+               p = get_property(dp, "dcr-mmio-space", NULL);
+       if (p == NULL)
+               return OF_BAD_ADDR;
+
+       /* Maybe could do some better range checking here */
+       ret = of_translate_address(dp, p);
+       if (ret != OF_BAD_ADDR)
+               ret += (u64)(stride) * (u64)dcr_n;
+       if (out_stride)
+               *out_stride = stride;
+       return ret;
+}
+
+dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
+                  unsigned int dcr_c)
+{
+       dcr_host_t ret = { .token = NULL, .stride = 0 };
+       u64 addr;
+
+       pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
+                dev->full_name, dcr_n, dcr_c);
+
+       addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
+       pr_debug("translates to addr: 0x%lx, stride: 0x%x\n",
+                addr, ret.stride);
+       if (addr == OF_BAD_ADDR)
+               return ret;
+       pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
+       ret.token = ioremap(addr, dcr_c * ret.stride);
+       if (ret.token == NULL)
+               return ret;
+       pr_debug("mapped at 0x%p -> base is 0x%p\n",
+                ret.token, ret.token - dcr_n * ret.stride);
+       ret.token -= dcr_n * ret.stride;
+       return ret;
+}
+
+void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c)
+{
+       dcr_host_t h = host;
+
+       if (h.token == NULL)
+               return;
+       h.token -= dcr_n * h.stride;
+       iounmap(h.token);
+       h.token = NULL;
+}
+
+#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
index dbe92ae2033319d89ffff84ad9e559d5d72189a8..ad31e56e892ba32b6e97314c17f2fb66bfa7f316 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/phy.h>
 #include <linux/fsl_devices.h>
 #include <linux/fs_enet_pd.h>
 #include <linux/fs_uart_pd.h>
@@ -146,7 +147,7 @@ static int __init gfar_mdio_of_init(void)
                }
 
                for (k = 0; k < 32; k++)
-                       mdio_data.irq[k] = -1;
+                       mdio_data.irq[k] = PHY_POLL;
 
                while ((child = of_get_next_child(np, child)) != NULL) {
                        int irq = irq_of_parse_and_map(child, 0);
@@ -177,6 +178,7 @@ static const char *gfar_tx_intr = "tx";
 static const char *gfar_rx_intr = "rx";
 static const char *gfar_err_intr = "error";
 
+
 static int __init gfar_of_init(void)
 {
        struct device_node *np;
@@ -204,8 +206,7 @@ static int __init gfar_of_init(void)
                if (ret)
                        goto err;
 
-               r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-               r[1].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[1]);
 
                model = get_property(np, "model", NULL);
 
@@ -214,12 +215,10 @@ static int __init gfar_of_init(void)
                        r[1].name = gfar_tx_intr;
 
                        r[2].name = gfar_rx_intr;
-                       r[2].start = r[2].end = irq_of_parse_and_map(np, 1);
-                       r[2].flags = IORESOURCE_IRQ;
+                       of_irq_to_resource(np, 1, &r[2]);
 
                        r[3].name = gfar_err_intr;
-                       r[3].start = r[3].end = irq_of_parse_and_map(np, 2);
-                       r[3].flags = IORESOURCE_IRQ;
+                       of_irq_to_resource(np, 2, &r[3]);
 
                        n_res += 2;
                }
@@ -323,8 +322,7 @@ static int __init fsl_i2c_of_init(void)
                if (ret)
                        goto err;
 
-               r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-               r[1].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[1]);
 
                i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
                if (IS_ERR(i2c_dev)) {
@@ -459,8 +457,7 @@ static int __init fsl_usb_of_init(void)
                if (ret)
                        goto err;
 
-               r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-               r[1].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[1]);
 
                usb_dev_mph =
                    platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -507,8 +504,7 @@ static int __init fsl_usb_of_init(void)
                if (ret)
                        goto unreg_mph;
 
-               r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
-               r[1].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[1]);
 
                usb_dev_dr =
                    platform_device_register_simple("fsl-ehci", i, r, 2);
@@ -591,8 +587,7 @@ static int __init fs_enet_of_init(void)
                r[2].name = fcc_regs_c;
                fs_enet_data.fcc_regs_c = r[2].start;
 
-               r[3].start = r[3].end = irq_of_parse_and_map(np, 0);
-               r[3].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[3]);
 
                fs_enet_dev =
                    platform_device_register_simple("fsl-cpm-fcc", i, &r[0], 4);
@@ -754,8 +749,7 @@ static int __init cpm_uart_of_init(void)
                        goto err;
                r[1].name = scc_pram;
 
-               r[2].start = r[2].end = irq_of_parse_and_map(np, 0);
-               r[2].flags = IORESOURCE_IRQ;
+               of_irq_to_resource(np, 0, &r[2]);
 
                cpm_uart_dev =
                    platform_device_register_simple("fsl-cpm-scc:uart", i, &r[0], 3);
index ba4833f57d47e343b2433c14350f612d0beace03..411480d5c626b9cac903887516305d3615db3f9a 100644 (file)
@@ -147,33 +147,51 @@ static u32 mpic_infos[][MPIC_IDX_END] = {
  */
 
 
-static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
-                           unsigned int reg)
+static inline u32 _mpic_read(enum mpic_reg_type type,
+                            struct mpic_reg_bank *rb,
+                            unsigned int reg)
 {
-       if (be)
-               return in_be32(base + (reg >> 2));
-       else
-               return in_le32(base + (reg >> 2));
+       switch(type) {
+#ifdef CONFIG_PPC_DCR
+       case mpic_access_dcr:
+               return dcr_read(rb->dhost,
+                               rb->dbase + reg + rb->doff);
+#endif
+       case mpic_access_mmio_be:
+               return in_be32(rb->base + (reg >> 2));
+       case mpic_access_mmio_le:
+       default:
+               return in_le32(rb->base + (reg >> 2));
+       }
 }
 
-static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
-                             unsigned int reg, u32 value)
+static inline void _mpic_write(enum mpic_reg_type type,
+                              struct mpic_reg_bank *rb,
+                              unsigned int reg, u32 value)
 {
-       if (be)
-               out_be32(base + (reg >> 2), value);
-       else
-               out_le32(base + (reg >> 2), value);
+       switch(type) {
+#ifdef CONFIG_PPC_DCR
+       case mpic_access_dcr:
+               return dcr_write(rb->dhost,
+                                rb->dbase + reg + rb->doff, value);
+#endif
+       case mpic_access_mmio_be:
+               return out_be32(rb->base + (reg >> 2), value);
+       case mpic_access_mmio_le:
+       default:
+               return out_le32(rb->base + (reg >> 2), value);
+       }
 }
 
 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
 {
-       unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
+       enum mpic_reg_type type = mpic->reg_type;
        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 
-       if (mpic->flags & MPIC_BROKEN_IPI)
-               be = !be;
-       return _mpic_read(be, mpic->gregs, offset);
+       if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
+               type = mpic_access_mmio_be;
+       return _mpic_read(type, &mpic->gregs, offset);
 }
 
 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
@@ -181,7 +199,7 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu
        unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
                              (ipi * MPIC_INFO(GREG_IPI_STRIDE));
 
-       _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
+       _mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
 }
 
 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
@@ -190,8 +208,7 @@ static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
 
        if (mpic->flags & MPIC_PRIMARY)
                cpu = hard_smp_processor_id();
-       return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,
-                         mpic->cpuregs[cpu], reg);
+       return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
 }
 
 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
@@ -201,7 +218,7 @@ static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 valu
        if (mpic->flags & MPIC_PRIMARY)
                cpu = hard_smp_processor_id();
 
-       _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
+       _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
 }
 
 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
@@ -209,7 +226,7 @@ static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigne
        unsigned int    isu = src_no >> mpic->isu_shift;
        unsigned int    idx = src_no & mpic->isu_mask;
 
-       return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+       return _mpic_read(mpic->reg_type, &mpic->isus[isu],
                          reg + (idx * MPIC_INFO(IRQ_STRIDE)));
 }
 
@@ -219,12 +236,12 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
        unsigned int    isu = src_no >> mpic->isu_shift;
        unsigned int    idx = src_no & mpic->isu_mask;
 
-       _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
+       _mpic_write(mpic->reg_type, &mpic->isus[isu],
                    reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
 }
 
-#define mpic_read(b,r)         _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
-#define mpic_write(b,r,v)      _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
+#define mpic_read(b,r)         _mpic_read(mpic->reg_type,&(b),(r))
+#define mpic_write(b,r,v)      _mpic_write(mpic->reg_type,&(b),(r),(v))
 #define mpic_ipi_read(i)       _mpic_ipi_read(mpic,(i))
 #define mpic_ipi_write(i,v)    _mpic_ipi_write(mpic,(i),(v))
 #define mpic_cpu_read(i)       _mpic_cpu_read(mpic,(i))
@@ -238,6 +255,38 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
  */
 
 
+static void _mpic_map_mmio(struct mpic *mpic, unsigned long phys_addr,
+                          struct mpic_reg_bank *rb, unsigned int offset,
+                          unsigned int size)
+{
+       rb->base = ioremap(phys_addr + offset, size);
+       BUG_ON(rb->base == NULL);
+}
+
+#ifdef CONFIG_PPC_DCR
+static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
+                         unsigned int offset, unsigned int size)
+{
+       rb->dbase = mpic->dcr_base;
+       rb->doff = offset;
+       rb->dhost = dcr_map(mpic->of_node, rb->dbase + rb->doff, size);
+       BUG_ON(!DCR_MAP_OK(rb->dhost));
+}
+
+static inline void mpic_map(struct mpic *mpic, unsigned long phys_addr,
+                           struct mpic_reg_bank *rb, unsigned int offset,
+                           unsigned int size)
+{
+       if (mpic->flags & MPIC_USES_DCR)
+               _mpic_map_dcr(mpic, rb, offset, size);
+       else
+               _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
+}
+#else /* CONFIG_PPC_DCR */
+#define mpic_map(m,p,b,o,s)    _mpic_map_mmio(m,p,b,o,s)
+#endif /* !CONFIG_PPC_DCR */
+
+
 
 /* Check if we have one of those nice broken MPICs with a flipped endian on
  * reads from IPI registers
@@ -845,7 +894,7 @@ static struct irq_host_ops mpic_host_ops = {
  */
 
 struct mpic * __init mpic_alloc(struct device_node *node,
-                               unsigned long phys_addr,
+                               phys_addr_t phys_addr,
                                unsigned int flags,
                                unsigned int isu_size,
                                unsigned int irq_count,
@@ -855,6 +904,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
        u32             reg;
        const char      *vers;
        int             i;
+       u64             paddr = phys_addr;
 
        mpic = alloc_bootmem(sizeof(struct mpic));
        if (mpic == NULL)
@@ -883,6 +933,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
        if (flags & MPIC_PRIMARY)
                mpic->hc_ht_irq.set_affinity = mpic_set_affinity;
 #endif /* CONFIG_MPIC_BROKEN_U3 */
+
 #ifdef CONFIG_SMP
        mpic->hc_ipi = mpic_ipi_chip;
        mpic->hc_ipi.typename = name;
@@ -893,15 +944,52 @@ struct mpic * __init mpic_alloc(struct device_node *node,
        mpic->irq_count = irq_count;
        mpic->num_sources = 0; /* so far */
 
+       /* Check for "big-endian" in device-tree */
+       if (node && get_property(node, "big-endian", NULL) != NULL)
+               mpic->flags |= MPIC_BIG_ENDIAN;
+
+
 #ifdef CONFIG_MPIC_WEIRD
        mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)];
 #endif
 
+       /* default register type */
+       mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ?
+               mpic_access_mmio_be : mpic_access_mmio_le;
+
+       /* If no physical address is passed in, a device-node is mandatory */
+       BUG_ON(paddr == 0 && node == NULL);
+
+       /* If no physical address passed in, check if it's dcr based */
+       if (paddr == 0 && get_property(node, "dcr-reg", NULL) != NULL)
+               mpic->flags |= MPIC_USES_DCR;
+
+#ifdef CONFIG_PPC_DCR
+       if (mpic->flags & MPIC_USES_DCR) {
+               const u32 *dbasep;
+               dbasep = get_property(node, "dcr-reg", NULL);
+               BUG_ON(dbasep == NULL);
+               mpic->dcr_base = *dbasep;
+               mpic->reg_type = mpic_access_dcr;
+       }
+#else
+       BUG_ON (mpic->flags & MPIC_USES_DCR);
+#endif /* CONFIG_PPC_DCR */
+
+       /* If the MPIC is not DCR based, and no physical address was passed
+        * in, try to obtain one
+        */
+       if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) {
+               const u32 *reg;
+               reg = get_property(node, "reg", NULL);
+               BUG_ON(reg == NULL);
+               paddr = of_translate_address(node, reg);
+               BUG_ON(paddr == OF_BAD_ADDR);
+       }
+
        /* Map the global registers */
-       mpic->gregs = ioremap(phys_addr + MPIC_INFO(GREG_BASE), 0x1000);
-       mpic->tmregs = mpic->gregs +
-                      ((MPIC_INFO(TIMER_BASE) - MPIC_INFO(GREG_BASE)) >> 2);
-       BUG_ON(mpic->gregs == NULL);
+       mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
+       mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
 
        /* Reset */
        if (flags & MPIC_WANTS_RESET) {
@@ -926,17 +1014,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
 
        /* Map the per-CPU registers */
        for (i = 0; i < mpic->num_cpus; i++) {
-               mpic->cpuregs[i] = ioremap(phys_addr + MPIC_INFO(CPU_BASE) +
-                                          i * MPIC_INFO(CPU_STRIDE), 0x1000);
-               BUG_ON(mpic->cpuregs[i] == NULL);
+               mpic_map(mpic, paddr, &mpic->cpuregs[i],
+                        MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
+                        0x1000);
        }
 
        /* Initialize main ISU if none provided */
        if (mpic->isu_size == 0) {
                mpic->isu_size = mpic->num_sources;
-               mpic->isus[0] = ioremap(phys_addr + MPIC_INFO(IRQ_BASE),
-                                       MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
-               BUG_ON(mpic->isus[0] == NULL);
+               mpic_map(mpic, paddr, &mpic->isus[0],
+                        MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
        }
        mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
        mpic->isu_mask = (1 << mpic->isu_shift) - 1;
@@ -956,10 +1043,11 @@ struct mpic * __init mpic_alloc(struct device_node *node,
                vers = "<unknown>";
                break;
        }
-       printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
-              name, vers, phys_addr, mpic->num_cpus);
-       printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
-              mpic->isu_shift, mpic->isu_mask);
+       printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
+              " max %d CPUs\n",
+              name, vers, (unsigned long long)paddr, mpic->num_cpus);
+       printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
+              mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
 
        mpic->next = mpics;
        mpics = mpic;
@@ -973,14 +1061,14 @@ struct mpic * __init mpic_alloc(struct device_node *node,
 }
 
 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
-                           unsigned long phys_addr)
+                           phys_addr_t paddr)
 {
        unsigned int isu_first = isu_num * mpic->isu_size;
 
        BUG_ON(isu_num >= MPIC_MAX_ISU);
 
-       mpic->isus[isu_num] = ioremap(phys_addr,
-                                     MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
+       mpic_map(mpic, paddr, &mpic->isus[isu_num], 0,
+                MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
        if ((isu_first + mpic->isu_size) > mpic->num_sources)
                mpic->num_sources = isu_first + mpic->isu_size;
 }
index e4223226a7a87aff07d112f38c4517d9726f262b..e3d71e083f355ff2a1975229642e18714acb65f2 100644 (file)
@@ -174,8 +174,7 @@ void qe_setbrg(u32 brg, u32 rate)
        u32 divisor, tempval;
        int div16 = 0;
 
-       bp = &qe_immr->brg.brgc1;
-       bp += brg;
+       bp = &qe_immr->brg.brgc[brg];
 
        divisor = (get_brg_clk() / rate);
        if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
index 75fa3104a43aaeb4ac1bdde7cdd1a19045c2b22e..e657559bea93fdec3f80fad8e6dbdefec4ce1c5a 100644 (file)
@@ -216,14 +216,12 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
                return -EINVAL;
        }
 
-       uccf = (struct ucc_fast_private *)
-                kmalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
+       uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
        if (!uccf) {
                uccf_err
                    ("ucc_fast_init: No memory for UCC slow data structure!");
                return -ENOMEM;
        }
-       memset(uccf, 0, sizeof(struct ucc_fast_private));
 
        /* Fill fast UCC structure */
        uccf->uf_info = uf_info;
index a49da6b73ecf6c2d8fa0d23b71f872ca362c07b0..47b56203f47ee0017656ba1d654f831b8dd2e74d 100644 (file)
@@ -168,14 +168,12 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
                return -EINVAL;
        }
 
-       uccs = (struct ucc_slow_private *)
-               kmalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
+       uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
        if (!uccs) {
                uccs_err
                    ("ucc_slow_init: No memory for UCC slow data structure!");
                return -ENOMEM;
        }
-       memset(uccs, 0, sizeof(struct ucc_slow_private));
 
        /* Fill slow UCC structure */
        uccs->us_info = us_info;
diff --git a/arch/powerpc/sysdev/rom.c b/arch/powerpc/sysdev/rom.c
new file mode 100644 (file)
index 0000000..bf5b3f1
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * ROM device registration
+ *
+ * (C) 2006 MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/kernel.h>
+#include <asm/of_device.h>
+
+static int __init powerpc_flash_init(void)
+{
+       struct device_node *node = NULL;
+
+       /*
+        * Register all the devices which type is "rom"
+        */
+       while ((node = of_find_node_by_type(node, "rom")) != NULL) {
+               if (node->name == NULL) {
+                       printk(KERN_WARNING "powerpc_flash_init: found 'rom' "
+                               "device, but with no name, skipping...\n");
+                       continue;
+               }
+               of_platform_device_create(node, node->name, NULL);
+       }
+       return 0;
+}
+
+arch_initcall(powerpc_flash_init);
diff --git a/arch/powerpc/sysdev/todc.c b/arch/powerpc/sysdev/todc.c
deleted file mode 100644 (file)
index 0a65980..0000000
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Time of Day Clock support for the M48T35, M48T37, M48T59, and MC146818
- * Real Time Clocks/Timekeepers.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001-2004 (c) MontaVista, Software, Inc.  This file is licensed under
- * the terms of the GNU General Public License version 2.  This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/timex.h>
-#include <linux/bcd.h>
-#include <linux/mc146818rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/time.h>
-#include <asm/todc.h>
-
-/*
- * Depending on the hardware on your board and your board design, the
- * RTC/NVRAM may be accessed either directly (like normal memory) or via
- * address/data registers.  If your board uses the direct method, set
- * 'nvram_data' to the base address of your nvram and leave 'nvram_as0' and
- * 'nvram_as1' NULL.  If your board uses address/data regs to access nvram,
- * set 'nvram_as0' to the address of the lower byte, set 'nvram_as1' to the
- * address of the upper byte (leave NULL if using mc146818), and set
- * 'nvram_data' to the address of the 8-bit data register.
- *
- * Note: Even though the documentation for the various RTC chips say that it
- *      take up to a second before it starts updating once the 'R' bit is
- *      cleared, they always seem to update even though we bang on it many
- *      times a second.  This is true, except for the Dallas Semi 1746/1747
- *      (possibly others).  Those chips seem to have a real problem whenever
- *      we set the 'R' bit before reading them, they basically stop counting.
- *                                             --MAG
- */
-
-/*
- * 'todc_info' should be initialized in your *_setup.c file to
- * point to a fully initialized 'todc_info_t' structure.
- * This structure holds all the register offsets for your particular
- * TODC/RTC chip.
- * TODC_ALLOC()/TODC_INIT() will allocate and initialize this table for you.
- */
-
-#ifdef RTC_FREQ_SELECT
-#undef RTC_FREQ_SELECT
-#define        RTC_FREQ_SELECT         control_b       /* Register A */
-#endif
-
-#ifdef RTC_CONTROL
-#undef RTC_CONTROL
-#define        RTC_CONTROL             control_a       /* Register B */
-#endif
-
-#ifdef RTC_INTR_FLAGS
-#undef RTC_INTR_FLAGS
-#define        RTC_INTR_FLAGS          watchdog        /* Register C */
-#endif
-
-#ifdef RTC_VALID
-#undef RTC_VALID
-#define        RTC_VALID               interrupts      /* Register D */
-#endif
-
-/* Access routines when RTC accessed directly (like normal memory) */
-u_char
-todc_direct_read_val(int addr)
-{
-       return readb((void __iomem *)(todc_info->nvram_data + addr));
-}
-
-void
-todc_direct_write_val(int addr, unsigned char val)
-{
-       writeb(val, (void __iomem *)(todc_info->nvram_data + addr));
-       return;
-}
-
-/* Access routines for accessing m48txx type chips via addr/data regs */
-u_char
-todc_m48txx_read_val(int addr)
-{
-       outb(addr, todc_info->nvram_as0);
-       outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
-       return inb(todc_info->nvram_data);
-}
-
-void
-todc_m48txx_write_val(int addr, unsigned char val)
-{
-       outb(addr, todc_info->nvram_as0);
-       outb(addr>>todc_info->as0_bits, todc_info->nvram_as1);
-       outb(val, todc_info->nvram_data);
-       return;
-}
-
-/* Access routines for accessing mc146818 type chips via addr/data regs */
-u_char
-todc_mc146818_read_val(int addr)
-{
-       outb_p(addr, todc_info->nvram_as0);
-       return inb_p(todc_info->nvram_data);
-}
-
-void
-todc_mc146818_write_val(int addr, unsigned char val)
-{
-       outb_p(addr, todc_info->nvram_as0);
-       outb_p(val, todc_info->nvram_data);
-}
-
-
-/*
- * Routines to make RTC chips with NVRAM buried behind an addr/data pair
- * have the NVRAM and clock regs appear at the same level.
- * The NVRAM will appear to start at addr 0 and the clock regs will appear
- * to start immediately after the NVRAM (actually, start at offset
- * todc_info->nvram_size).
- */
-static inline u_char
-todc_read_val(int addr)
-{
-       u_char  val;
-
-       if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
-               if (addr < todc_info->nvram_size) { /* NVRAM */
-                       ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
-                       val = ppc_md.rtc_read_val(todc_info->nvram_data_reg);
-               } else { /* Clock Reg */
-                       addr -= todc_info->nvram_size;
-                       val = ppc_md.rtc_read_val(addr);
-               }
-       } else
-               val = ppc_md.rtc_read_val(addr);
-
-       return val;
-}
-
-static inline void
-todc_write_val(int addr, u_char val)
-{
-       if (todc_info->sw_flags & TODC_FLAG_2_LEVEL_NVRAM) {
-               if (addr < todc_info->nvram_size) { /* NVRAM */
-                       ppc_md.rtc_write_val(todc_info->nvram_addr_reg, addr);
-                       ppc_md.rtc_write_val(todc_info->nvram_data_reg, val);
-               } else { /* Clock Reg */
-                       addr -= todc_info->nvram_size;
-                       ppc_md.rtc_write_val(addr, val);
-               }
-       } else
-               ppc_md.rtc_write_val(addr, val);
-}
-
-/*
- * TODC routines
- *
- * There is some ugly stuff in that there are assumptions for the mc146818.
- *
- * Assumptions:
- *     - todc_info->control_a has the offset as mc146818 Register B reg
- *     - todc_info->control_b has the offset as mc146818 Register A reg
- *     - m48txx control reg's write enable or 'W' bit is same as
- *       mc146818 Register B 'SET' bit (i.e., 0x80)
- *
- * These assumptions were made to make the code simpler.
- */
-long __init
-todc_time_init(void)
-{
-       u_char  cntl_b;
-
-       if (!ppc_md.rtc_read_val)
-               ppc_md.rtc_read_val = ppc_md.nvram_read_val;
-       if (!ppc_md.rtc_write_val)
-               ppc_md.rtc_write_val = ppc_md.nvram_write_val;
-
-       cntl_b = todc_read_val(todc_info->control_b);
-
-       if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-               if ((cntl_b & 0x70) != 0x20) {
-                       printk(KERN_INFO "TODC real-time-clock was stopped."
-                               "  Now starting...");
-                       cntl_b &= ~0x70;
-                       cntl_b |= 0x20;
-               }
-
-               todc_write_val(todc_info->control_b, cntl_b);
-       } else if (todc_info->rtc_type == TODC_TYPE_DS17285) {
-               u_char mode;
-
-               mode = todc_read_val(TODC_TYPE_DS17285_CNTL_A);
-               /* Make sure countdown clear is not set */
-               mode &= ~0x40;
-               /* Enable oscillator, extended register set */
-               mode |= 0x30;
-               todc_write_val(TODC_TYPE_DS17285_CNTL_A, mode);
-
-       } else if (todc_info->rtc_type == TODC_TYPE_DS1501) {
-               u_char  month;
-
-               todc_info->enable_read = TODC_DS1501_CNTL_B_TE;
-               todc_info->enable_write = TODC_DS1501_CNTL_B_TE;
-
-               month = todc_read_val(todc_info->month);
-
-               if ((month & 0x80) == 0x80) {
-                       printk(KERN_INFO "TODC %s %s\n",
-                               "real-time-clock was stopped.",
-                               "Now starting...");
-                       month &= ~0x80;
-                       todc_write_val(todc_info->month, month);
-               }
-
-               cntl_b &= ~TODC_DS1501_CNTL_B_TE;
-               todc_write_val(todc_info->control_b, cntl_b);
-       } else { /* must be a m48txx type */
-               u_char  cntl_a;
-
-               todc_info->enable_read = TODC_MK48TXX_CNTL_A_R;
-               todc_info->enable_write = TODC_MK48TXX_CNTL_A_W;
-
-               cntl_a = todc_read_val(todc_info->control_a);
-
-               /* Check & clear STOP bit in control B register */
-               if (cntl_b & TODC_MK48TXX_DAY_CB) {
-                       printk(KERN_INFO "TODC %s %s\n",
-                               "real-time-clock was stopped.",
-                               "Now starting...");
-
-                       cntl_a |= todc_info->enable_write;
-                       cntl_b &= ~TODC_MK48TXX_DAY_CB;/* Start Oscil */
-
-                       todc_write_val(todc_info->control_a, cntl_a);
-                       todc_write_val(todc_info->control_b, cntl_b);
-               }
-
-               /* Make sure READ & WRITE bits are cleared. */
-               cntl_a &= ~(todc_info->enable_write | todc_info->enable_read);
-               todc_write_val(todc_info->control_a, cntl_a);
-       }
-
-       return 0;
-}
-
-/*
- * There is some ugly stuff in that there are assumptions that for a mc146818,
- * the todc_info->control_a has the offset of the mc146818 Register B reg and
- * that the register'ss 'SET' bit is the same as the m48txx's write enable
- * bit in the control register of the m48txx (i.e., 0x80).
- *
- * It was done to make the code look simpler.
- */
-void
-todc_get_rtc_time(struct rtc_time *tm)
-{
-       uint    year = 0, mon = 0, mday = 0, hour = 0, min = 0, sec = 0;
-       uint    limit, i;
-       u_char  save_control, uip = 0;
-       extern void GregorianDay(struct rtc_time *);
-
-       spin_lock(&rtc_lock);
-       save_control = todc_read_val(todc_info->control_a);
-
-       if (todc_info->rtc_type != TODC_TYPE_MC146818) {
-               limit = 1;
-
-               switch (todc_info->rtc_type) {
-               case TODC_TYPE_DS1553:
-               case TODC_TYPE_DS1557:
-               case TODC_TYPE_DS1743:
-               case TODC_TYPE_DS1746:  /* XXXX BAD HACK -> FIX */
-               case TODC_TYPE_DS1747:
-               case TODC_TYPE_DS17285:
-                       break;
-               default:
-                       todc_write_val(todc_info->control_a,
-                               (save_control | todc_info->enable_read));
-               }
-       } else
-               limit = 100000000;
-
-       for (i=0; i<limit; i++) {
-               if (todc_info->rtc_type == TODC_TYPE_MC146818)
-                       uip = todc_read_val(todc_info->RTC_FREQ_SELECT);
-
-               sec = todc_read_val(todc_info->seconds) & 0x7f;
-               min = todc_read_val(todc_info->minutes) & 0x7f;
-               hour = todc_read_val(todc_info->hours) & 0x3f;
-               mday = todc_read_val(todc_info->day_of_month) & 0x3f;
-               mon = todc_read_val(todc_info->month) & 0x1f;
-               year = todc_read_val(todc_info->year) & 0xff;
-
-               if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-                       uip |= todc_read_val(todc_info->RTC_FREQ_SELECT);
-                       if ((uip & RTC_UIP) == 0)
-                               break;
-               }
-       }
-
-       if (todc_info->rtc_type != TODC_TYPE_MC146818) {
-               switch (todc_info->rtc_type) {
-               case TODC_TYPE_DS1553:
-               case TODC_TYPE_DS1557:
-               case TODC_TYPE_DS1743:
-               case TODC_TYPE_DS1746:  /* XXXX BAD HACK -> FIX */
-               case TODC_TYPE_DS1747:
-               case TODC_TYPE_DS17285:
-                       break;
-               default:
-                       save_control &= ~(todc_info->enable_read);
-                       todc_write_val(todc_info->control_a, save_control);
-               }
-       }
-       spin_unlock(&rtc_lock);
-
-       if ((todc_info->rtc_type != TODC_TYPE_MC146818)
-                       || ((save_control & RTC_DM_BINARY) == 0)
-                       || RTC_ALWAYS_BCD) {
-               BCD_TO_BIN(sec);
-               BCD_TO_BIN(min);
-               BCD_TO_BIN(hour);
-               BCD_TO_BIN(mday);
-               BCD_TO_BIN(mon);
-               BCD_TO_BIN(year);
-       }
-
-       if ((year + 1900) < 1970) {
-               year += 100;
-       }
-
-       tm->tm_sec = sec;
-       tm->tm_min = min;
-       tm->tm_hour = hour;
-       tm->tm_mday = mday;
-       tm->tm_mon = mon;
-       tm->tm_year = year;
-
-       GregorianDay(tm);
-}
-
-int
-todc_set_rtc_time(struct rtc_time *tm)
-{
-       u_char save_control, save_freq_select = 0;
-
-       spin_lock(&rtc_lock);
-       save_control = todc_read_val(todc_info->control_a);
-
-       /* Assuming MK48T59_RTC_CA_WRITE & RTC_SET are equal */
-       todc_write_val(todc_info->control_a,
-               (save_control | todc_info->enable_write));
-       save_control &= ~(todc_info->enable_write); /* in case it was set */
-
-       if (todc_info->rtc_type == TODC_TYPE_MC146818) {
-               save_freq_select = todc_read_val(todc_info->RTC_FREQ_SELECT);
-               todc_write_val(todc_info->RTC_FREQ_SELECT,
-                       save_freq_select | RTC_DIV_RESET2);
-       }
-
-       if ((todc_info->rtc_type != TODC_TYPE_MC146818)
-                       || ((save_control & RTC_DM_BINARY) == 0)
-                       || RTC_ALWAYS_BCD) {
-               BIN_TO_BCD(tm->tm_sec);
-               BIN_TO_BCD(tm->tm_min);
-               BIN_TO_BCD(tm->tm_hour);
-               BIN_TO_BCD(tm->tm_mon);
-               BIN_TO_BCD(tm->tm_mday);
-               BIN_TO_BCD(tm->tm_year);
-       }
-
-       todc_write_val(todc_info->seconds, tm->tm_sec);
-       todc_write_val(todc_info->minutes, tm->tm_min);
-       todc_write_val(todc_info->hours, tm->tm_hour);
-       todc_write_val(todc_info->month, tm->tm_mon);
-       todc_write_val(todc_info->day_of_month, tm->tm_mday);
-       todc_write_val(todc_info->year, tm->tm_year);
-
-       todc_write_val(todc_info->control_a, save_control);
-
-       if (todc_info->rtc_type == TODC_TYPE_MC146818)
-               todc_write_val(todc_info->RTC_FREQ_SELECT, save_freq_select);
-
-       spin_unlock(&rtc_lock);
-       return 0;
-}
index 322f86e93de5b43f677e7bbc7e26a2a80fcb417c..ae249c6bbbcf3473920933b9aa7113718942321d 100644 (file)
@@ -3,6 +3,8 @@
  *
  * 2004-2005 (c) Tundra Semiconductor Corp.
  * Author: Alex Bounine (alexandreb@tundra.com)
+ * Author: Roy Zang (tie-fei.zang@freescale.com)
+ *        Add pci interrupt router host
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -48,6 +50,8 @@
 
 u32 tsi108_pci_cfg_base;
 u32 tsi108_csr_vir_base;
+static struct device_node *pci_irq_node;
+static struct irq_host *pci_irq_host;
 
 extern u32 get_vir_csrbase(void);
 extern u32 tsi108_read_reg(u32 reg_offset);
@@ -378,6 +382,38 @@ static struct irq_chip tsi108_pci_irq = {
        .unmask = tsi108_pci_irq_enable,
 };
 
+static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
+                           u32 *intspec, unsigned int intsize,
+                           irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+{
+       *out_hwirq = intspec[0];
+       *out_flags = IRQ_TYPE_LEVEL_HIGH;
+       return 0;
+}
+
+static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
+                         irq_hw_number_t hw)
+{      unsigned int irq;
+       DBG("%s(%d, 0x%lx)\n", __FUNCTION__, virq, hw);
+       if ((virq >= 1) && (virq <= 4)){
+               irq = virq + IRQ_PCI_INTAD_BASE - 1;
+               get_irq_desc(irq)->status |= IRQ_LEVEL;
+               set_irq_chip(irq, &tsi108_pci_irq);
+       }
+       return 0;
+}
+
+static int pci_irq_host_match(struct irq_host *h, struct device_node *node)
+{
+       return pci_irq_node == node;
+}
+
+static struct irq_host_ops pci_irq_host_ops = {
+       .match = pci_irq_host_match,
+       .map = pci_irq_host_map,
+       .xlate = pci_irq_host_xlate,
+};
+
 /*
  * Exported functions
  */
@@ -391,15 +427,15 @@ static struct irq_chip tsi108_pci_irq = {
  * to the MPIC.
  */
 
-void __init tsi108_pci_int_init(void)
+void __init tsi108_pci_int_init(struct device_node *node)
 {
-       u_int i;
-
        DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
 
-       for (i = 0; i < NUM_PCI_IRQS; i++) {
-               irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq;
-               irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL;
+       pci_irq_node = of_node_get(node);
+       pci_irq_host = irq_alloc_host(IRQ_HOST_MAP_LEGACY, 0, &pci_irq_host_ops, 0);
+       if (pci_irq_host == NULL) {
+               printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n");
+               return;
        }
 
        init_pci_source();
index 109d874ecfbeaa7980381aba99081a8d9d56481a..51d97588e762e6fc9a55cae2d07c6598751343ba 100644 (file)
@@ -3,5 +3,10 @@
 ifdef CONFIG_PPC64
 EXTRA_CFLAGS += -mno-minimal-toc
 endif
-obj-y                  += xmon.o ppc-dis.o ppc-opc.o setjmp.o start.o \
-                          nonstdio.o
+
+obj-y                  += xmon.o setjmp.o start.o nonstdio.o
+
+ifdef CONFIG_XMON_DISASSEMBLY
+obj-y                  += ppc-dis.o ppc-opc.o
+obj-$(CONFIG_SPU_BASE) += spu-dis.o spu-opc.o
+endif
diff --git a/arch/powerpc/xmon/dis-asm.h b/arch/powerpc/xmon/dis-asm.h
new file mode 100644 (file)
index 0000000..be3533b
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef _POWERPC_XMON_DIS_ASM_H
+#define _POWERPC_XMON_DIS_ASM_H
+/*
+ * Copyright (C) 2006 Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern void print_address (unsigned long memaddr);
+
+#ifdef CONFIG_XMON_DISASSEMBLY
+extern int print_insn_powerpc(unsigned long insn, unsigned long memaddr);
+extern int print_insn_spu(unsigned long insn, unsigned long memaddr);
+#else
+static inline int print_insn_powerpc(unsigned long insn, unsigned long memaddr)
+{
+       printf("%.8x", insn);
+       return 0;
+}
+
+static inline int print_insn_spu(unsigned long insn, unsigned long memaddr)
+{
+       printf("%.8x", insn);
+       return 0;
+}
+#endif
+
+#endif /* _POWERPC_XMON_DIS_ASM_H */
index ac0a9d2427e060e62a4004614408161816b82a13..89098f320ad570577328ff42ba4e426a5d22d37f 100644 (file)
@@ -1,5 +1,6 @@
 /* ppc-dis.c -- Disassemble PowerPC instructions
-   Copyright 1994 Free Software Foundation, Inc.
+   Copyright 1994, 1995, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+   Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
 This file is part of GDB, GAS, and the GNU binutils.
@@ -16,27 +17,36 @@ the GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this file; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
+#include <asm/cputable.h>
 #include "nonstdio.h"
 #include "ansidecl.h"
 #include "ppc.h"
-
-extern void print_address (unsigned long memaddr);
+#include "dis-asm.h"
 
 /* Print a PowerPC or POWER instruction.  */
 
 int
-print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
+print_insn_powerpc (unsigned long insn, unsigned long memaddr)
 {
   const struct powerpc_opcode *opcode;
   const struct powerpc_opcode *opcode_end;
   unsigned long op;
+  int dialect;
 
-  if (dialect == 0)
-    dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
+  dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
              | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
 
+  if (cpu_has_feature(CPU_FTRS_POWER5))
+    dialect |= PPC_OPCODE_POWER5;
+
+  if (cpu_has_feature(CPU_FTRS_CELL))
+    dialect |= PPC_OPCODE_CELL | PPC_OPCODE_ALTIVEC;
+
+  if (cpu_has_feature(CPU_FTRS_POWER6))
+    dialect |= PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_ALTIVEC;
+
   /* Get the major opcode of the instruction.  */
   op = PPC_OP (insn);
 
@@ -121,7 +131,8 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
            }
 
          /* Print the operand as directed by the flags.  */
-         if ((operand->flags & PPC_OPERAND_GPR) != 0)
+         if ((operand->flags & PPC_OPERAND_GPR) != 0
+             || ((operand->flags & PPC_OPERAND_GPR_0) != 0 && value != 0))
            printf("r%ld", value);
          else if ((operand->flags & PPC_OPERAND_FPR) != 0)
            printf("f%ld", value);
@@ -137,7 +148,7 @@ print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
          else
            {
              if (operand->bits == 3)
-               printf("cr%d", value);
+               printf("cr%ld", value);
              else
                {
                  static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
index 5ee8fc32f824143550cf9556bfebf42daea16a1c..5d841f4b353072deccd5b024b10d285b17cadf0f 100644 (file)
@@ -1,6 +1,6 @@
 /* ppc-opc.c -- PowerPC opcode list
-   Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003
-   Free Software Foundation, Inc.
+   Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+   2005 Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
    This file is part of GDB, GAS, and the GNU binutils.
@@ -17,8 +17,8 @@
 
    You should have received a copy of the GNU General Public License
    along with this file; see the file COPYING.  If not, write to the Free
-   Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-   02111-1307, USA.  */
+   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
+   02110-1301, USA.  */
 
 #include <linux/stddef.h>
 #include "nonstdio.h"
@@ -86,6 +86,8 @@ static unsigned long insert_sh6 (unsigned long, long, int, const char **);
 static long extract_sh6 (unsigned long, int, int *);
 static unsigned long insert_spr (unsigned long, long, int, const char **);
 static long extract_spr (unsigned long, int, int *);
+static unsigned long insert_sprg (unsigned long, long, int, const char **);
+static long extract_sprg (unsigned long, int, int *);
 static unsigned long insert_tbr (unsigned long, long, int, const char **);
 static long extract_tbr (unsigned long, int, int *);
 static unsigned long insert_ev2 (unsigned long, long, int, const char **);
@@ -196,8 +198,11 @@ const struct powerpc_operand powerpc_operands[] =
 #define BOE BO + 1
   { 5, 21, insert_boe, extract_boe, 0 },
 
+#define BH BOE + 1
+  { 2, 11, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The BT field in an X or XL form instruction.  */
-#define BT BOE + 1
+#define BT BH + 1
   { 5, 21, NULL, NULL, PPC_OPERAND_CR },
 
   /* The condition register number portion of the BI field in a B form
@@ -301,10 +306,14 @@ const struct powerpc_operand powerpc_operands[] =
 #define L FXM4 + 1
   { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
-  /* The LEV field in a POWER SC form instruction.  */
-#define LEV L + 1
+  /* The LEV field in a POWER SVC form instruction.  */
+#define SVC_LEV L + 1
   { 7, 5, NULL, NULL, 0 },
 
+  /* The LEV field in an SC form instruction.  */
+#define LEV SVC_LEV + 1
+  { 7, 5, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The LI field in an I form instruction.  The lower two bits are
      forced to zero.  */
 #define LI LEV + 1
@@ -346,7 +355,7 @@ const struct powerpc_operand powerpc_operands[] =
 
   /* The MO field in an mbar instruction.  */
 #define MO MB6 + 1
-  { 5, 21, NULL, NULL, 0 },
+  { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
   /* The NB field in an X form instruction.  The value 32 is stored as
      0.  */
@@ -364,30 +373,38 @@ const struct powerpc_operand powerpc_operands[] =
 #define RA_MASK (0x1f << 16)
   { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
 
+  /* As above, but 0 in the RA field means zero, not r0.  */
+#define RA0 RA + 1
+  { 5, 16, NULL, NULL, PPC_OPERAND_GPR_0 },
+
   /* The RA field in the DQ form lq instruction, which has special
      value restrictions.  */
-#define RAQ RA + 1
-  { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR },
+#define RAQ RA0 + 1
+  { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in a D or X form instruction which is an updating
      load, which means that the RA field may not be zero and may not
      equal the RT field.  */
 #define RAL RAQ + 1
-  { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in an lmw instruction, which has special value
      restrictions.  */
 #define RAM RAL + 1
-  { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RA field in a D or X form instruction which is an updating
      store or an updating floating point load, which means that the RA
      field may not be zero.  */
 #define RAS RAM + 1
-  { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
+  { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR_0 },
+
+  /* The RA field of the tlbwe instruction, which is optional.  */
+#define RAOPT RAS + 1
+  { 5, 16, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
 
   /* The RB field in an X, XO, M, or MDS form instruction.  */
-#define RB RAS + 1
+#define RB RAOPT + 1
 #define RB_MASK (0x1f << 11)
   { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
 
@@ -408,15 +425,20 @@ const struct powerpc_operand powerpc_operands[] =
   /* The RS field of the DS form stq instruction, which has special
      value restrictions.  */
 #define RSQ RS + 1
-  { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR },
+  { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR_0 },
 
   /* The RT field of the DQ form lq instruction, which has special
      value restrictions.  */
 #define RTQ RSQ + 1
-  { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR },
+  { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR_0 },
+
+  /* The RS field of the tlbwe instruction, which is optional.  */
+#define RSO RTQ + 1
+#define RTO RSO
+  { 5, 21, NULL, NULL, PPC_OPERAND_GPR | PPC_OPERAND_OPTIONAL },
 
   /* The SH field in an X or M form instruction.  */
-#define SH RTQ + 1
+#define SH RSO + 1
 #define SH_MASK (0x1f << 11)
   { 5, 11, NULL, NULL, 0 },
 
@@ -425,8 +447,12 @@ const struct powerpc_operand powerpc_operands[] =
 #define SH6_MASK ((0x1f << 11) | (1 << 1))
   { 6, 1, insert_sh6, extract_sh6, 0 },
 
+  /* The SH field of the tlbwe instruction, which is optional.  */
+#define SHO SH6 + 1
+  { 5, 11,NULL, NULL, PPC_OPERAND_OPTIONAL },
+
   /* The SI field in a D form instruction.  */
-#define SI SH6 + 1
+#define SI SHO + 1
   { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
 
   /* The SI field in a D form instruction when we accept a wide range
@@ -448,8 +474,7 @@ const struct powerpc_operand powerpc_operands[] =
 
   /* The SPRG register number in an XFX form m[ft]sprg instruction.  */
 #define SPRG SPRBAT + 1
-#define SPRG_MASK (0x3 << 16)
-  { 2, 16, NULL, NULL, 0 },
+  { 5, 16, insert_sprg, extract_sprg, 0 },
 
   /* The SR field in an X form instruction.  */
 #define SR SPRG + 1
@@ -536,10 +561,45 @@ const struct powerpc_operand powerpc_operands[] =
 #define WS_MASK (0x7 << 11)
   { 3, 11, NULL, NULL, 0 },
 
-  /* The L field in an mtmsrd instruction */
+  /* The L field in an mtmsrd or A form instruction.  */
 #define MTMSRD_L WS + 1
+#define A_L MTMSRD_L
   { 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL },
 
+  /* The DCM field in a Z form instruction.  */
+#define DCM MTMSRD_L + 1
+  { 6, 16, NULL, NULL, 0 },
+
+  /* Likewise, the DGM field in a Z form instruction.  */
+#define DGM DCM + 1
+  { 6, 16, NULL, NULL, 0 },
+
+#define TE DGM + 1
+  { 5, 11, NULL, NULL, 0 },
+
+#define RMC TE + 1
+  { 2, 21, NULL, NULL, 0 },
+
+#define R RMC + 1
+  { 1, 15, NULL, NULL, 0 },
+
+#define SP R + 1
+  { 2, 11, NULL, NULL, 0 },
+
+#define S SP + 1
+  { 1, 11, NULL, NULL, 0 },
+
+  /* SH field starting at bit position 16.  */
+#define SH16 S + 1
+  { 6, 10, NULL, NULL, 0 },
+
+  /* The L field in an X form with the RT field fixed instruction.  */
+#define XRT_L SH16 + 1
+  { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
+
+  /* The EH field in larx instruction.  */
+#define EH XRT_L + 1
+  { 1, 0, NULL, NULL, PPC_OPERAND_OPTIONAL },
 };
 
 /* The functions used to insert and extract complicated operands.  */
@@ -550,7 +610,6 @@ const struct powerpc_operand powerpc_operands[] =
    and the extraction function just checks that the fields are the
    same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bat (unsigned long insn,
            long value ATTRIBUTE_UNUSED,
@@ -576,7 +635,6 @@ extract_bat (unsigned long insn,
    and the extraction function just checks that the fields are the
    same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bba (unsigned long insn,
            long value ATTRIBUTE_UNUSED,
@@ -599,7 +657,6 @@ extract_bba (unsigned long insn,
 /* The BD field in a B form instruction.  The lower two bits are
    forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bd (unsigned long insn,
           long value,
@@ -609,7 +666,6 @@ insert_bd (unsigned long insn,
   return insn | (value & 0xfffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_bd (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -631,7 +687,6 @@ extract_bd (unsigned long insn,
    in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000
    for branch on CTR.  We only handle the taken/not-taken hint here.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bdm (unsigned long insn,
            long value,
@@ -677,7 +732,6 @@ extract_bdm (unsigned long insn,
    This is like BDM, above, except that the branch is expected to be
    taken.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_bdp (unsigned long insn,
            long value,
@@ -831,7 +885,6 @@ extract_boe (unsigned long insn,
 /* The DQ field in a DQ form instruction.  This is like D, but the
    lower four bits are forced to zero. */
 
-/*ARGSUSED*/
 static unsigned long
 insert_dq (unsigned long insn,
           long value,
@@ -843,7 +896,6 @@ insert_dq (unsigned long insn,
   return insn | (value & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_dq (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -918,7 +970,6 @@ extract_ev8 (unsigned long insn,
 /* The DS field in a DS form instruction.  This is like D, but the
    lower two bits are forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_ds (unsigned long insn,
           long value,
@@ -930,7 +981,6 @@ insert_ds (unsigned long insn,
   return insn | (value & 0xfffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_ds (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -941,7 +991,6 @@ extract_ds (unsigned long insn,
 
 /* The DE field in a DE form instruction.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_de (unsigned long insn,
           long value,
@@ -953,7 +1002,6 @@ insert_de (unsigned long insn,
   return insn | ((value << 4) & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_de (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -964,7 +1012,6 @@ extract_de (unsigned long insn,
 
 /* The DES field in a DES form instruction.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_des (unsigned long insn,
            long value,
@@ -978,7 +1025,6 @@ insert_des (unsigned long insn,
   return insn | ((value << 2) & 0xfff0);
 }
 
-/*ARGSUSED*/
 static long
 extract_des (unsigned long insn,
             int dialect ATTRIBUTE_UNUSED,
@@ -995,17 +1041,33 @@ insert_fxm (unsigned long insn,
            int dialect,
            const char **errmsg)
 {
+  /* If we're handling the mfocrf and mtocrf insns ensure that exactly
+     one bit of the mask field is set.  */
+  if ((insn & (1 << 20)) != 0)
+    {
+      if (value == 0 || (value & -value) != value)
+       {
+         *errmsg = _("invalid mask field");
+         value = 0;
+       }
+    }
+
   /* If the optional field on mfcr is missing that means we want to use
      the old form of the instruction that moves the whole cr.  In that
      case we'll have VALUE zero.  There doesn't seem to be a way to
      distinguish this from the case where someone writes mfcr %r3,0.  */
-  if (value == 0)
+  else if (value == 0)
     ;
 
   /* If only one bit of the FXM field is set, we can use the new form
      of the instruction, which is faster.  Unlike the Power4 branch hint
-     encoding, this is not backward compatible.  */
-  else if ((dialect & PPC_OPCODE_POWER4) != 0 && (value & -value) == value)
+     encoding, this is not backward compatible.  Do not generate the
+     new form unless -mpower4 has been given, or -many and the two
+     operand form of mfcr was used.  */
+  else if ((value & -value) == value
+          && ((dialect & PPC_OPCODE_POWER4) != 0
+              || ((dialect & PPC_OPCODE_ANY) != 0
+                  && (insn & (0x3ff << 1)) == 19 << 1)))
     insn |= 1 << 20;
 
   /* Any other value on mfcr is an error.  */
@@ -1020,7 +1082,7 @@ insert_fxm (unsigned long insn,
 
 static long
 extract_fxm (unsigned long insn,
-            int dialect,
+            int dialect ATTRIBUTE_UNUSED,
             int *invalid)
 {
   long mask = (insn >> 12) & 0xff;
@@ -1028,14 +1090,9 @@ extract_fxm (unsigned long insn,
   /* Is this a Power4 insn?  */
   if ((insn & (1 << 20)) != 0)
     {
-      if ((dialect & PPC_OPCODE_POWER4) == 0)
+      /* Exactly one bit of MASK should be set.  */
+      if (mask == 0 || (mask & -mask) != mask)
        *invalid = 1;
-      else
-       {
-         /* Exactly one bit of MASK should be set.  */
-         if (mask == 0 || (mask & -mask) != mask)
-           *invalid = 1;
-       }
     }
 
   /* Check that non-power4 form of mfcr has a zero MASK.  */
@@ -1051,7 +1108,6 @@ extract_fxm (unsigned long insn,
 /* The LI field in an I form instruction.  The lower two bits are
    forced to zero.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_li (unsigned long insn,
           long value,
@@ -1063,7 +1119,6 @@ insert_li (unsigned long insn,
   return insn | (value & 0x3fffffc);
 }
 
-/*ARGSUSED*/
 static long
 extract_li (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -1163,7 +1218,6 @@ extract_mbe (unsigned long insn,
 /* The MB or ME field in an MD or MDS form instruction.  The high bit
    is wrapped to the low end.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_mb6 (unsigned long insn,
            long value,
@@ -1173,7 +1227,6 @@ insert_mb6 (unsigned long insn,
   return insn | ((value & 0x1f) << 6) | (value & 0x20);
 }
 
-/*ARGSUSED*/
 static long
 extract_mb6 (unsigned long insn,
             int dialect ATTRIBUTE_UNUSED,
@@ -1198,7 +1251,6 @@ insert_nb (unsigned long insn,
   return insn | ((value & 0x1f) << 11);
 }
 
-/*ARGSUSED*/
 static long
 extract_nb (unsigned long insn,
            int dialect ATTRIBUTE_UNUSED,
@@ -1217,7 +1269,6 @@ extract_nb (unsigned long insn,
    invalid, since we never want to recognize an instruction which uses
    a field of this type.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_nsi (unsigned long insn,
            long value,
@@ -1269,7 +1320,6 @@ insert_ram (unsigned long insn,
 /* The RA field in the DQ form lq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_raq (unsigned long insn,
            long value,
@@ -1304,7 +1354,6 @@ insert_ras (unsigned long insn,
    function just copies the BT field into the BA field, and the
    extraction function just checks that the fields are the same.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rbs (unsigned long insn,
            long value ATTRIBUTE_UNUSED,
@@ -1327,7 +1376,6 @@ extract_rbs (unsigned long insn,
 /* The RT field of the DQ form lq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rtq (unsigned long insn,
            long value,
@@ -1342,7 +1390,6 @@ insert_rtq (unsigned long insn,
 /* The RS field of the DS form stq instruction, which has special
    value restrictions.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_rsq (unsigned long insn,
            long value ATTRIBUTE_UNUSED,
@@ -1356,7 +1403,6 @@ insert_rsq (unsigned long insn,
 
 /* The SH field in an MD form instruction.  This is split.  */
 
-/*ARGSUSED*/
 static unsigned long
 insert_sh6 (unsigned long insn,
            long value,
@@ -1366,7 +1412,6 @@ insert_sh6 (unsigned long insn,
   return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
 }
 
-/*ARGSUSED*/
 static long
 extract_sh6 (unsigned long insn,
             int dialect ATTRIBUTE_UNUSED,
@@ -1395,6 +1440,47 @@ extract_spr (unsigned long insn,
   return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
 }
 
+/* Some dialects have 8 SPRG registers instead of the standard 4.  */
+
+static unsigned long
+insert_sprg (unsigned long insn,
+            long value,
+            int dialect,
+            const char **errmsg)
+{
+  /* This check uses PPC_OPCODE_403 because PPC405 is later defined
+     as a synonym.  If ever a 405 specific dialect is added this
+     check should use that instead.  */
+  if (value > 7
+      || (value > 3
+         && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+    *errmsg = _("invalid sprg number");
+
+  /* If this is mfsprg4..7 then use spr 260..263 which can be read in
+     user mode.  Anything else must use spr 272..279.  */
+  if (value <= 3 || (insn & 0x100) != 0)
+    value |= 0x10;
+
+  return insn | ((value & 0x17) << 16);
+}
+
+static long
+extract_sprg (unsigned long insn,
+             int dialect,
+             int *invalid)
+{
+  unsigned long val = (insn >> 16) & 0x1f;
+
+  /* mfsprg can use 260..263 and 272..279.  mtsprg only uses spr 272..279
+     If not BOOKE or 405, then both use only 272..275.  */
+  if (val <= 3
+      || (val < 0x10 && (insn & 0x100) != 0)
+      || (val - 0x10 > 3
+         && (dialect & (PPC_OPCODE_BOOKE | PPC_OPCODE_403)) == 0))
+    *invalid = 1;
+  return val & 7;
+}
+
 /* The TBR field in an XFX instruction.  This is just like SPR, but it
    is optional.  When TBR is omitted, it must be inserted as 268 (the
    magic number of the TB register).  These functions treat 0
@@ -1460,6 +1546,9 @@ extract_tbr (unsigned long insn,
 /* An A_MASK with the FRA and FRC fields fixed.  */
 #define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
 
+/* An AFRAFRC_MASK, but with L bit clear.  */
+#define AFRALFRC_MASK (AFRAFRC_MASK & ~((unsigned long) 1 << 16))
+
 /* A B form instruction.  */
 #define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1))
 #define B_MASK B (0x3f, 1, 1)
@@ -1494,11 +1583,11 @@ extract_tbr (unsigned long insn,
 
 /* An Context form instruction.  */
 #define CTX(op, xop)   (OP (op) | (((unsigned long)(xop)) & 0x7))
-#define CTX_MASK       CTX(0x3f, 0x7)
+#define CTX_MASK CTX(0x3f, 0x7)
 
 /* An User Context form instruction.  */
 #define UCTX(op, xop)  (OP (op) | (((unsigned long)(xop)) & 0x1f))
-#define UCTX_MASK      UCTX(0x3f, 0x1f)
+#define UCTX_MASK UCTX(0x3f, 0x1f)
 
 /* The main opcode mask with the RA field clear.  */
 #define DRA_MASK (OP_MASK | RA_MASK)
@@ -1570,12 +1659,21 @@ extract_tbr (unsigned long insn,
 /* An X form instruction.  */
 #define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
 
+/* A Z form instruction.  */
+#define Z(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1))
+
 /* An X form instruction with the RC bit specified.  */
 #define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
 
+/* A Z form instruction with the RC bit specified.  */
+#define ZRC(op, xop, rc) (Z ((op), (xop)) | ((rc) & 1))
+
 /* The mask for an X form instruction.  */
 #define X_MASK XRC (0x3f, 0x3ff, 1)
 
+/* The mask for a Z form instruction.  */
+#define Z_MASK ZRC (0x3f, 0x1ff, 1)
+
 /* An X_MASK with the RA field fixed.  */
 #define XRA_MASK (X_MASK | RA_MASK)
 
@@ -1585,6 +1683,9 @@ extract_tbr (unsigned long insn,
 /* An X_MASK with the RT field fixed.  */
 #define XRT_MASK (X_MASK | RT_MASK)
 
+/* An XRT_MASK mask with the L bits clear.  */
+#define XLRT_MASK (XRT_MASK & ~((unsigned long) 0x3 << 21))
+
 /* An X_MASK with the RA and RB fields fixed.  */
 #define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
 
@@ -1597,8 +1698,8 @@ extract_tbr (unsigned long insn,
 /* An XRTRA_MASK, but with L bit clear.  */
 #define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21))
 
-/* An X form comparison instruction.  */
-#define XCMPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
+/* An X form instruction with the L bit specified.  */
+#define XOPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
 
 /* The mask for an X form comparison instruction.  */
 #define XCMP_MASK (X_MASK | (((unsigned long)1) << 22))
@@ -1621,6 +1722,9 @@ extract_tbr (unsigned long insn,
 /* An X form sync instruction with everything filled in except the LS field.  */
 #define XSYNC_MASK (0xff9fffff)
 
+/* An X_MASK, but with the EH bit clear.  */
+#define XEH_MASK (X_MASK & ~((unsigned long )1))
+
 /* An X form AltiVec dss instruction.  */
 #define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25))
 #define XDSS_MASK XDSS(0x3f, 0x3ff, 1)
@@ -1663,6 +1767,9 @@ extract_tbr (unsigned long insn,
 #define XLYBB_MASK (XLYLK_MASK | BB_MASK)
 #define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
 
+/* A mask for branch instructions using the BH field.  */
+#define XLBH_MASK (XL_MASK | (0x1c << 11))
+
 /* An XL_MASK with the BO and BB fields fixed.  */
 #define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
 
@@ -1682,11 +1789,12 @@ extract_tbr (unsigned long insn,
 #define XS_MASK XS (0x3f, 0x1ff, 1)
 
 /* A mask for the FXM version of an XFX form instruction.  */
-#define XFXFXM_MASK (X_MASK | (1 << 11))
+#define XFXFXM_MASK (X_MASK | (1 << 11) | (1 << 20))
 
 /* An XFX form instruction with the FXM field filled in.  */
-#define XFXM(op, xop, fxm) \
-  (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12))
+#define XFXM(op, xop, fxm, p4) \
+  (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12) \
+   | ((unsigned long)(p4) << 20))
 
 /* An XFX form instruction with the SPR field filled in.  */
 #define XSPR(op, xop, spr) \
@@ -1699,7 +1807,7 @@ extract_tbr (unsigned long insn,
 
 /* An XFX form instruction with the SPR field filled in except for the
    SPRG field.  */
-#define XSPRG_MASK (XSPR_MASK &~ SPRG_MASK)
+#define XSPRG_MASK (XSPR_MASK & ~(0x17 << 16))
 
 /* An X form instruction with everything filled in except the E field.  */
 #define XE_MASK (0xffff7fff)
@@ -1769,6 +1877,9 @@ extract_tbr (unsigned long insn,
 #define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON
 #define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM
 #define POWER4 PPC_OPCODE_POWER4
+#define POWER5 PPC_OPCODE_POWER5
+#define POWER6 PPC_OPCODE_POWER6
+#define CELL   PPC_OPCODE_CELL
 #define PPC32   PPC_OPCODE_32 | PPC_OPCODE_PPC
 #define PPC64   PPC_OPCODE_64 | PPC_OPCODE_PPC
 #define PPC403 PPC_OPCODE_403
@@ -1776,7 +1887,7 @@ extract_tbr (unsigned long insn,
 #define PPC440 PPC_OPCODE_440
 #define PPC750 PPC
 #define PPC860 PPC
-#define PPCVEC PPC_OPCODE_ALTIVEC | PPC_OPCODE_PPC
+#define PPCVEC PPC_OPCODE_ALTIVEC
 #define        POWER   PPC_OPCODE_POWER
 #define        POWER2  PPC_OPCODE_POWER | PPC_OPCODE_POWER2
 #define PPCPWR2        PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2
@@ -1790,6 +1901,7 @@ extract_tbr (unsigned long insn,
 #define BOOKE  PPC_OPCODE_BOOKE
 #define BOOKE64        PPC_OPCODE_BOOKE64
 #define CLASSIC        PPC_OPCODE_CLASSIC
+#define PPCE300 PPC_OPCODE_E300
 #define PPCSPE PPC_OPCODE_SPE
 #define PPCISEL        PPC_OPCODE_ISEL
 #define PPCEFS PPC_OPCODE_EFS
@@ -1952,6 +2064,41 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "nmaclhwso.",        XO(4,494,1,1), XO_MASK, PPC405|PPC440,  { RT, RA, RB } },
 { "mfvscr",  VX(4, 1540), VX_MASK,     PPCVEC,         { VD } },
 { "mtvscr",  VX(4, 1604), VX_MASK,     PPCVEC,         { VB } },
+
+  /* Double-precision opcodes.  */
+  /* Some of these conflict with AltiVec, so move them before, since
+     PPCVEC includes the PPC_OPCODE_PPC set.  */
+{ "efscfd",   VX(4, 719), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdabs",   VX(4, 740), VX_MASK,     PPCEFS,         { RS, RA } },
+{ "efdnabs",  VX(4, 741), VX_MASK,     PPCEFS,         { RS, RA } },
+{ "efdneg",   VX(4, 742), VX_MASK,     PPCEFS,         { RS, RA } },
+{ "efdadd",   VX(4, 736), VX_MASK,     PPCEFS,         { RS, RA, RB } },
+{ "efdsub",   VX(4, 737), VX_MASK,     PPCEFS,         { RS, RA, RB } },
+{ "efdmul",   VX(4, 744), VX_MASK,     PPCEFS,         { RS, RA, RB } },
+{ "efddiv",   VX(4, 745), VX_MASK,     PPCEFS,         { RS, RA, RB } },
+{ "efdcmpgt", VX(4, 748), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdcmplt", VX(4, 749), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdcmpeq", VX(4, 750), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdtstgt", VX(4, 764), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdtstlt", VX(4, 765), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdtsteq", VX(4, 766), VX_MASK,     PPCEFS,         { CRFD, RA, RB } },
+{ "efdcfsi",  VX(4, 753), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfsid", VX(4, 739), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfui",  VX(4, 752), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfuid", VX(4, 738), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfsf",  VX(4, 755), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfuf",  VX(4, 754), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctsi",  VX(4, 757), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctsidz",VX(4, 747), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctsiz", VX(4, 762), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctui",  VX(4, 756), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctuidz",VX(4, 746), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctuiz", VX(4, 760), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctsf",  VX(4, 759), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdctuf",  VX(4, 758), VX_MASK,     PPCEFS,         { RS, RB } },
+{ "efdcfs",   VX(4, 751), VX_MASK,     PPCEFS,         { RS, RB } },
+  /* End of double-precision opcodes.  */
+
 { "vaddcuw", VX(4,  384), VX_MASK,     PPCVEC,         { VD, VA, VB } },
 { "vaddfp",  VX(4,   10), VX_MASK,     PPCVEC,         { VD, VA, VB } },
 { "vaddsbs", VX(4,  768), VX_MASK,     PPCVEC,         { VD, VA, VB } },
@@ -2389,16 +2536,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "li",             OP(14),    DRA_MASK,       PPCCOM,         { RT, SI } },
 { "lil",     OP(14),   DRA_MASK,       PWRCOM,         { RT, SI } },
-{ "addi",    OP(14),   OP_MASK,        PPCCOM,         { RT, RA, SI } },
-{ "cal",     OP(14),   OP_MASK,        PWRCOM,         { RT, D, RA } },
-{ "subi",    OP(14),   OP_MASK,        PPCCOM,         { RT, RA, NSI } },
-{ "la",             OP(14),    OP_MASK,        PPCCOM,         { RT, D, RA } },
+{ "addi",    OP(14),   OP_MASK,        PPCCOM,         { RT, RA0, SI } },
+{ "cal",     OP(14),   OP_MASK,        PWRCOM,         { RT, D, RA0 } },
+{ "subi",    OP(14),   OP_MASK,        PPCCOM,         { RT, RA0, NSI } },
+{ "la",             OP(14),    OP_MASK,        PPCCOM,         { RT, D, RA0 } },
 
 { "lis",     OP(15),   DRA_MASK,       PPCCOM,         { RT, SISIGNOPT } },
 { "liu",     OP(15),   DRA_MASK,       PWRCOM,         { RT, SISIGNOPT } },
-{ "addis",   OP(15),   OP_MASK,        PPCCOM,         { RT,RA,SISIGNOPT } },
-{ "cau",     OP(15),   OP_MASK,        PWRCOM,         { RT,RA,SISIGNOPT } },
-{ "subis",   OP(15),   OP_MASK,        PPCCOM,         { RT, RA, NSI } },
+{ "addis",   OP(15),   OP_MASK,        PPCCOM,         { RT,RA0,SISIGNOPT } },
+{ "cau",     OP(15),   OP_MASK,        PWRCOM,         { RT,RA0,SISIGNOPT } },
+{ "subis",   OP(15),   OP_MASK,        PPCCOM,         { RT, RA0, NSI } },
 
 { "bdnz-",   BBO(16,BODNZ,0,0),      BBOATBI_MASK, PPCCOM,     { BDM } },
 { "bdnz+",   BBO(16,BODNZ,0,0),      BBOATBI_MASK, PPCCOM,     { BDP } },
@@ -2665,9 +2812,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "bcla+",   B(16,1,1),        B_MASK,         PPCCOM,         { BOE, BI, BDPA } },
 { "bcla",    B(16,1,1),        B_MASK,         COM,            { BO, BI, BDA } },
 
-{ "sc",      SC(17,1,0), 0xffffffff,   PPC,            { 0 } },
-{ "svc",     SC(17,0,0), SC_MASK,      POWER,          { LEV, FL1, FL2 } },
-{ "svcl",    SC(17,0,1), SC_MASK,      POWER,          { LEV, FL1, FL2 } },
+{ "sc",      SC(17,1,0), SC_MASK,      PPC,            { LEV } },
+{ "svc",     SC(17,0,0), SC_MASK,      POWER,          { SVC_LEV, FL1, FL2 } },
+{ "svcl",    SC(17,0,1), SC_MASK,      POWER,          { SVC_LEV, FL1, FL2 } },
 { "svca",    SC(17,1,0), SC_MASK,      PWRCOM,         { SV } },
 { "svcla",   SC(17,1,1), SC_MASK,      POWER,          { SV } },
 
@@ -2890,12 +3037,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM,  { BI } },
 { "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4,        { BI } },
 { "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
-{ "bclr",    XLLK(19,16,0), XLYBB_MASK,        PPCCOM,         { BO, BI } },
-{ "bclrl",   XLLK(19,16,1), XLYBB_MASK,        PPCCOM,         { BO, BI } },
 { "bclr+",   XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM,     { BOE, BI } },
 { "bclrl+",  XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM,     { BOE, BI } },
 { "bclr-",   XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM,     { BOE, BI } },
 { "bclrl-",  XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM,     { BOE, BI } },
+{ "bclr",    XLLK(19,16,0), XLBH_MASK, PPCCOM,         { BO, BI, BH } },
+{ "bclrl",   XLLK(19,16,1), XLBH_MASK, PPCCOM,         { BO, BI, BH } },
 { "bcr",     XLLK(19,16,0), XLBB_MASK, PWRCOM,         { BO, BI } },
 { "bcrl",    XLLK(19,16,1), XLBB_MASK, PWRCOM,         { BO, BI } },
 { "bclre",   XLLK(19,17,0), XLBB_MASK, BOOKE64,        { BO, BI } },
@@ -2924,14 +3071,23 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "crand",   XL(19,257), XL_MASK,      COM,            { BT, BA, BB } },
 
+{ "hrfid",   XL(19,274), 0xffffffff,   POWER5 | CELL,  { 0 } },
+
 { "crset",   XL(19,289), XL_MASK,      PPCCOM,         { BT, BAT, BBA } },
 { "creqv",   XL(19,289), XL_MASK,      COM,            { BT, BA, BB } },
 
+{ "doze",    XL(19,402), 0xffffffff,   POWER6,         { 0 } },
+
 { "crorc",   XL(19,417), XL_MASK,      COM,            { BT, BA, BB } },
 
+{ "nap",     XL(19,434), 0xffffffff,   POWER6,         { 0 } },
+
 { "crmove",  XL(19,449), XL_MASK,      PPCCOM,         { BT, BA, BBA } },
 { "cror",    XL(19,449), XL_MASK,      COM,            { BT, BA, BB } },
 
+{ "sleep",   XL(19,466), 0xffffffff,   POWER6,         { 0 } },
+{ "rvwinkle", XL(19,498), 0xffffffff,  POWER6,         { 0 } },
+
 { "bctr",    XLO(19,BOU,528,0), XLBOBIBB_MASK, COM,    { 0 } },
 { "bctrl",   XLO(19,BOU,528,1), XLBOBIBB_MASK, COM,    { 0 } },
 { "bltctr",  XLOCB(19,BOT,CBLT,528,0),  XLBOCBBB_MASK, PPCCOM, { CR } },
@@ -3074,12 +3230,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } },
 { "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
 { "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } },
-{ "bcctr",   XLLK(19,528,0),     XLYBB_MASK,  PPCCOM,  { BO, BI } },
 { "bcctr-",  XLYLK(19,528,0,0),  XLYBB_MASK,  PPCCOM,  { BOE, BI } },
 { "bcctr+",  XLYLK(19,528,1,0),  XLYBB_MASK,  PPCCOM,  { BOE, BI } },
-{ "bcctrl",  XLLK(19,528,1),     XLYBB_MASK,  PPCCOM,  { BO, BI } },
 { "bcctrl-", XLYLK(19,528,0,1),  XLYBB_MASK,  PPCCOM,  { BOE, BI } },
 { "bcctrl+", XLYLK(19,528,1,1),  XLYBB_MASK,  PPCCOM,  { BOE, BI } },
+{ "bcctr",   XLLK(19,528,0),     XLBH_MASK,   PPCCOM,  { BO, BI, BH } },
+{ "bcctrl",  XLLK(19,528,1),     XLBH_MASK,   PPCCOM,  { BO, BI, BH } },
 { "bcc",     XLLK(19,528,0),     XLBB_MASK,   PWRCOM,  { BO, BI } },
 { "bccl",    XLLK(19,528,1),     XLBB_MASK,   PWRCOM,  { BO, BI } },
 { "bcctre",  XLLK(19,529,0),     XLYBB_MASK,  BOOKE64, { BO, BI } },
@@ -3158,8 +3314,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "rldcr",   MDS(30,9,0), MDS_MASK,    PPC64,          { RA, RS, RB, ME6 } },
 { "rldcr.",  MDS(30,9,1), MDS_MASK,    PPC64,          { RA, RS, RB, ME6 } },
 
-{ "cmpw",    XCMPL(31,0,0), XCMPL_MASK, PPCCOM,                { OBF, RA, RB } },
-{ "cmpd",    XCMPL(31,0,1), XCMPL_MASK, PPC64,         { OBF, RA, RB } },
+{ "cmpw",    XOPL(31,0,0), XCMPL_MASK, PPCCOM,         { OBF, RA, RB } },
+{ "cmpd",    XOPL(31,0,1), XCMPL_MASK, PPC64,          { OBF, RA, RB } },
 { "cmp",     X(31,0),  XCMP_MASK,      PPC,            { BF, L, RA, RB } },
 { "cmp",     X(31,0),  XCMPL_MASK,     PWRCOM,         { BF, RA, RB } },
 
@@ -3228,17 +3384,18 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "iseleq",  X(31,79),      X_MASK,    PPCISEL,        { RT, RA, RB } },
 { "isel",    XISEL(31,15),  XISEL_MASK,        PPCISEL,        { RT, RA, RB, CRB } },
 
-{ "mfcr",    X(31,19), XRARB_MASK,     NOPOWER4,       { RT } },
+{ "mfocrf",  XFXM(31,19,0,1), XFXFXM_MASK, COM,                { RT, FXM } },
+{ "mfcr",    X(31,19), XRARB_MASK,     NOPOWER4 | COM, { RT } },
 { "mfcr",    X(31,19), XFXFXM_MASK,    POWER4,         { RT, FXM4 } },
 
-{ "lwarx",   X(31,20), X_MASK,         PPC,            { RT, RA, RB } },
+{ "lwarx",   X(31,20), XEH_MASK,       PPC,            { RT, RA0, RB, EH } },
 
-{ "ldx",     X(31,21), X_MASK,         PPC64,          { RT, RA, RB } },
+{ "ldx",     X(31,21), X_MASK,         PPC64,          { RT, RA0, RB } },
 
-{ "icbt",    X(31,22), X_MASK,         BOOKE,          { CT, RA, RB } },
+{ "icbt",    X(31,22), X_MASK,         BOOKE|PPCE300,  { CT, RA, RB } },
 { "icbt",    X(31,262),        XRT_MASK,       PPC403,         { RA, RB } },
 
-{ "lwzx",    X(31,23), X_MASK,         PPCCOM,         { RT, RA, RB } },
+{ "lwzx",    X(31,23), X_MASK,         PPCCOM,         { RT, RA0, RB } },
 { "lx",      X(31,23), X_MASK,         PWRCOM,         { RT, RA, RB } },
 
 { "slw",     XRC(31,24,0), X_MASK,     PPCCOM,         { RA, RS, RB } },
@@ -3262,10 +3419,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "icbte",   X(31,30), X_MASK,         BOOKE64,        { CT, RA, RB } },
 
-{ "lwzxe",   X(31,31), X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lwzxe",   X(31,31), X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
-{ "cmplw",   XCMPL(31,32,0), XCMPL_MASK, PPCCOM,       { OBF, RA, RB } },
-{ "cmpld",   XCMPL(31,32,1), XCMPL_MASK, PPC64,                { OBF, RA, RB } },
+{ "cmplw",   XOPL(31,32,0), XCMPL_MASK, PPCCOM,        { OBF, RA, RB } },
+{ "cmpld",   XOPL(31,32,1), XCMPL_MASK, PPC64,         { OBF, RA, RB } },
 { "cmpl",    X(31,32), XCMP_MASK,       PPC,           { BF, L, RA, RB } },
 { "cmpl",    X(31,32), XCMPL_MASK,      PWRCOM,        { BF, RA, RB } },
 
@@ -3324,15 +3481,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "mfmsr",   X(31,83), XRARB_MASK,     COM,            { RT } },
 
-{ "ldarx",   X(31,84), X_MASK,         PPC64,          { RT, RA, RB } },
+{ "ldarx",   X(31,84), XEH_MASK,       PPC64,          { RT, RA0, RB, EH } },
 
-{ "dcbf",    X(31,86), XRT_MASK,       PPC,            { RA, RB } },
+{ "dcbfl",   XOPL(31,86,1), XRT_MASK,  POWER5,         { RA, RB } },
+{ "dcbf",    X(31,86), XLRT_MASK,      PPC,            { RA, RB, XRT_L } },
 
-{ "lbzx",    X(31,87), X_MASK,         COM,            { RT, RA, RB } },
+{ "lbzx",    X(31,87), X_MASK,         COM,            { RT, RA0, RB } },
 
 { "dcbfe",   X(31,94), XRT_MASK,       BOOKE64,        { RA, RB } },
 
-{ "lbzxe",   X(31,95), X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lbzxe",   X(31,95), X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
 { "neg",     XO(31,104,0,0), XORB_MASK,        COM,            { RT, RA } },
 { "neg.",    XO(31,104,0,1), XORB_MASK,        COM,            { RT, RA } },
@@ -3350,12 +3508,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "lbzux",   X(31,119),        X_MASK,         COM,            { RT, RAL, RB } },
 
+{ "popcntb", X(31,122), XRB_MASK,      POWER5,         { RA, RS } },
+
 { "not",     XRC(31,124,0), X_MASK,    COM,            { RA, RS, RBS } },
 { "nor",     XRC(31,124,0), X_MASK,    COM,            { RA, RS, RB } },
 { "not.",    XRC(31,124,1), X_MASK,    COM,            { RA, RS, RBS } },
 { "nor.",    XRC(31,124,1), X_MASK,    COM,            { RA, RS, RB } },
 
-{ "lwarxe",  X(31,126),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lwarxe",  X(31,126),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
 { "lbzuxe",  X(31,127),        X_MASK,         BOOKE64,        { RT, RAL, RB } },
 
@@ -3383,21 +3543,22 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "dcbtstlse",X(31,142),X_MASK,                PPCCHLK64,      { CT, RA, RB }},
 
-{ "mtcr",    XFXM(31,144,0xff), XRARB_MASK, COM,       { RS }},
+{ "mtocrf",  XFXM(31,144,0,1), XFXFXM_MASK, COM,       { FXM, RS } },
+{ "mtcr",    XFXM(31,144,0xff,0), XRARB_MASK, COM,     { RS }},
 { "mtcrf",   X(31,144),        XFXFXM_MASK,    COM,            { FXM, RS } },
 
 { "mtmsr",   X(31,146),        XRARB_MASK,     COM,            { RS } },
 
-{ "stdx",    X(31,149), X_MASK,                PPC64,          { RS, RA, RB } },
+{ "stdx",    X(31,149), X_MASK,                PPC64,          { RS, RA0, RB } },
 
-{ "stwcx.",  XRC(31,150,1), X_MASK,    PPC,            { RS, RA, RB } },
+{ "stwcx.",  XRC(31,150,1), X_MASK,    PPC,            { RS, RA0, RB } },
 
-{ "stwx",    X(31,151), X_MASK,                PPCCOM,         { RS, RA, RB } },
+{ "stwx",    X(31,151), X_MASK,                PPCCOM,         { RS, RA0, RB } },
 { "stx",     X(31,151), X_MASK,                PWRCOM,         { RS, RA, RB } },
 
-{ "stwcxe.", XRC(31,158,1), X_MASK,    BOOKE64,        { RS, RA, RB } },
+{ "stwcxe.", XRC(31,158,1), X_MASK,    BOOKE64,        { RS, RA0, RB } },
 
-{ "stwxe",   X(31,159), X_MASK,                BOOKE64,        { RS, RA, RB } },
+{ "stwxe",   X(31,159), X_MASK,                BOOKE64,        { RS, RA0, RB } },
 
 { "slq",     XRC(31,152,0), X_MASK,    M601,           { RA, RS, RB } },
 { "slq.",    XRC(31,152,1), X_MASK,    M601,           { RA, RS, RB } },
@@ -3405,6 +3566,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sle",     XRC(31,153,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sle.",    XRC(31,153,1), X_MASK,    M601,           { RA, RS, RB } },
 
+{ "prtyw",   X(31,154),        XRB_MASK,       POWER6,         { RA, RS } },
+
 { "wrteei",  X(31,163),        XE_MASK,        PPC403 | BOOKE, { E } },
 
 { "dcbtls",  X(31,166),        X_MASK,         PPCCHLK,        { CT, RA, RB }},
@@ -3415,11 +3578,13 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "stdux",   X(31,181),        X_MASK,         PPC64,          { RS, RAS, RB } },
 
 { "stwux",   X(31,183),        X_MASK,         PPCCOM,         { RS, RAS, RB } },
-{ "stux",    X(31,183),        X_MASK,         PWRCOM,         { RS, RA, RB } },
+{ "stux",    X(31,183),        X_MASK,         PWRCOM,         { RS, RA0, RB } },
 
 { "sliq",    XRC(31,184,0), X_MASK,    M601,           { RA, RS, SH } },
 { "sliq.",   XRC(31,184,1), X_MASK,    M601,           { RA, RS, SH } },
 
+{ "prtyd",   X(31,186),        XRB_MASK,       POWER6,         { RA, RS } },
+
 { "stwuxe",  X(31,191),        X_MASK,         BOOKE64,        { RS, RAS, RB } },
 
 { "subfze",  XO(31,200,0,0), XORB_MASK, PPCCOM,                { RT, RA } },
@@ -3442,9 +3607,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "mtsr",    X(31,210),        XRB_MASK|(1<<20), COM32,        { SR, RS } },
 
-{ "stdcx.",  XRC(31,214,1), X_MASK,    PPC64,          { RS, RA, RB } },
+{ "stdcx.",  XRC(31,214,1), X_MASK,    PPC64,          { RS, RA0, RB } },
 
-{ "stbx",    X(31,215),        X_MASK,         COM,            { RS, RA, RB } },
+{ "stbx",    X(31,215),        X_MASK,         COM,            { RS, RA0, RB } },
 
 { "sllq",    XRC(31,216,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sllq.",   XRC(31,216,1), X_MASK,    M601,           { RA, RS, RB } },
@@ -3452,7 +3617,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sleq",    XRC(31,217,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sleq.",   XRC(31,217,1), X_MASK,    M601,           { RA, RS, RB } },
 
-{ "stbxe",   X(31,223),        X_MASK,         BOOKE64,        { RS, RA, RB } },
+{ "stbxe",   X(31,223),        X_MASK,         BOOKE64,        { RS, RA0, RB } },
 
 { "icblc",   X(31,230),        X_MASK,         PPCCHLK,        { CT, RA, RB }},
 
@@ -3492,7 +3657,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mtsrin",  X(31,242),        XRA_MASK,       PPC32,          { RS, RB } },
 { "mtsri",   X(31,242),        XRA_MASK,       POWER32,        { RS, RB } },
 
-{ "dcbtst",  X(31,246),        XRT_MASK,       PPC,            { CT, RA, RB } },
+{ "dcbtst",  X(31,246),        X_MASK, PPC,                    { CT, RA, RB } },
 
 { "stbux",   X(31,247),        X_MASK,         COM,            { RS, RAS, RB } },
 
@@ -3519,26 +3684,26 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "addo.",   XO(31,266,1,1), XO_MASK,  PPCCOM,         { RT, RA, RB } },
 { "caxo.",   XO(31,266,1,1), XO_MASK,  PWRCOM,         { RT, RA, RB } },
 
-{ "tlbiel",  X(31,274), XRTRA_MASK,    POWER4,         { RB } },
+{ "tlbiel",  X(31,274), XRTLRA_MASK,   POWER4,         { RB, L } },
 
 { "mfapidi", X(31,275), X_MASK,                BOOKE,          { RT, RA } },
 
 { "lscbx",   XRC(31,277,0), X_MASK,    M601,           { RT, RA, RB } },
 { "lscbx.",  XRC(31,277,1), X_MASK,    M601,           { RT, RA, RB } },
 
-{ "dcbt",    X(31,278),        XRT_MASK,       PPC,            { CT, RA, RB } },
+{ "dcbt",    X(31,278),        X_MASK,         PPC,            { CT, RA, RB } },
 
-{ "lhzx",    X(31,279),        X_MASK,         COM,            { RT, RA, RB } },
+{ "lhzx",    X(31,279),        X_MASK,         COM,            { RT, RA0, RB } },
 
 { "eqv",     XRC(31,284,0), X_MASK,    COM,            { RA, RS, RB } },
 { "eqv.",    XRC(31,284,1), X_MASK,    COM,            { RA, RS, RB } },
 
 { "dcbte",   X(31,286),        X_MASK,         BOOKE64,        { CT, RA, RB } },
 
-{ "lhzxe",   X(31,287),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lhzxe",   X(31,287),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
 { "tlbie",   X(31,306),        XRTLRA_MASK,    PPC,            { RB, L } },
-{ "tlbi",    X(31,306),        XRT_MASK,       POWER,          { RA, RB } },
+{ "tlbi",    X(31,306),        XRT_MASK,       POWER,          { RA0, RB } },
 
 { "eciwx",   X(31,310), X_MASK,                PPC,            { RT, RA, RB } },
 
@@ -3607,6 +3772,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mfsdr1",     XSPR(31,339,25),   XSPR_MASK, COM,     { RT } },
 { "mfsrr0",     XSPR(31,339,26),   XSPR_MASK, COM,     { RT } },
 { "mfsrr1",     XSPR(31,339,27),   XSPR_MASK, COM,     { RT } },
+{ "mfcfar",     XSPR(31,339,28),   XSPR_MASK, POWER6,  { RT } },
 { "mfpid",      XSPR(31,339,48),   XSPR_MASK, BOOKE,    { RT } },
 { "mfpid",      XSPR(31,339,945),  XSPR_MASK, PPC403,  { RT } },
 { "mfcsrr0",    XSPR(31,339,58),   XSPR_MASK, BOOKE,    { RT } },
@@ -3634,21 +3800,21 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mfbar",      XSPR(31,339,159),  XSPR_MASK, PPC860,  { RT } },
 { "mfvrsave",   XSPR(31,339,256),  XSPR_MASK, PPCVEC,  { RT } },
 { "mfusprg0",   XSPR(31,339,256),  XSPR_MASK, BOOKE,    { RT } },
-{ "mfsprg4",    XSPR(31,339,260),  XSPR_MASK, PPC405,  { RT } },
-{ "mfsprg5",    XSPR(31,339,261),  XSPR_MASK, PPC405,  { RT } },
-{ "mfsprg6",    XSPR(31,339,262),  XSPR_MASK, PPC405,  { RT } },
-{ "mfsprg7",    XSPR(31,339,263),  XSPR_MASK, PPC405,  { RT } },
 { "mftb",       X(31,371),        X_MASK,    CLASSIC,  { RT, TBR } },
 { "mftb",       XSPR(31,339,268),  XSPR_MASK, BOOKE,    { RT } },
 { "mftbl",      XSPR(31,371,268),  XSPR_MASK, CLASSIC, { RT } },
 { "mftbl",      XSPR(31,339,268),  XSPR_MASK, BOOKE,    { RT } },
 { "mftbu",      XSPR(31,371,269),  XSPR_MASK, CLASSIC, { RT } },
 { "mftbu",      XSPR(31,339,269),  XSPR_MASK, BOOKE,    { RT } },
-{ "mfsprg",     XSPR(31,339,272),  XSPRG_MASK, PPC,    { RT, SPRG } },
+{ "mfsprg",     XSPR(31,339,256),  XSPRG_MASK, PPC,    { RT, SPRG } },
 { "mfsprg0",    XSPR(31,339,272),  XSPR_MASK, PPC,     { RT } },
 { "mfsprg1",    XSPR(31,339,273),  XSPR_MASK, PPC,     { RT } },
 { "mfsprg2",    XSPR(31,339,274),  XSPR_MASK, PPC,     { RT } },
 { "mfsprg3",    XSPR(31,339,275),  XSPR_MASK, PPC,     { RT } },
+{ "mfsprg4",    XSPR(31,339,260),  XSPR_MASK, PPC405 | BOOKE,  { RT } },
+{ "mfsprg5",    XSPR(31,339,261),  XSPR_MASK, PPC405 | BOOKE,  { RT } },
+{ "mfsprg6",    XSPR(31,339,262),  XSPR_MASK, PPC405 | BOOKE,  { RT } },
+{ "mfsprg7",    XSPR(31,339,263),  XSPR_MASK, PPC405 | BOOKE,  { RT } },
 { "mfasr",      XSPR(31,339,280),  XSPR_MASK, PPC64,   { RT } },
 { "mfear",      XSPR(31,339,282),  XSPR_MASK, PPC,     { RT } },
 { "mfpir",      XSPR(31,339,286),  XSPR_MASK, BOOKE,    { RT } },
@@ -3699,6 +3865,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mfspefscr",  XSPR(31,339,512),  XSPR_MASK, PPCSPE,  { RT } },
 { "mfbbear",    XSPR(31,339,513),  XSPR_MASK, PPCBRLK,  { RT } },
 { "mfbbtar",    XSPR(31,339,514),  XSPR_MASK, PPCBRLK,  { RT } },
+{ "mfivor32",   XSPR(31,339,528),  XSPR_MASK, PPCSPE,  { RT } },
+{ "mfivor33",   XSPR(31,339,529),  XSPR_MASK, PPCSPE,  { RT } },
+{ "mfivor34",   XSPR(31,339,530),  XSPR_MASK, PPCSPE,  { RT } },
+{ "mfivor35",   XSPR(31,339,531),  XSPR_MASK, PPCPMR,  { RT } },
 { "mfibatu",    XSPR(31,339,528),  XSPRBAT_MASK, PPC,  { RT, SPRBAT } },
 { "mfibatl",    XSPR(31,339,529),  XSPRBAT_MASK, PPC,  { RT, SPRBAT } },
 { "mfdbatu",    XSPR(31,339,536),  XSPRBAT_MASK, PPC,  { RT, SPRBAT } },
@@ -3708,10 +3878,11 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mfic_dat",   XSPR(31,339,562),  XSPR_MASK, PPC860,  { RT } },
 { "mfdc_cst",   XSPR(31,339,568),  XSPR_MASK, PPC860,  { RT } },
 { "mfdc_adr",   XSPR(31,339,569),  XSPR_MASK, PPC860,  { RT } },
-{ "mfdc_dat",   XSPR(31,339,570),  XSPR_MASK, PPC860,  { RT } },
 { "mfmcsrr0",   XSPR(31,339,570),  XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfdc_dat",   XSPR(31,339,570),  XSPR_MASK, PPC860,  { RT } },
 { "mfmcsrr1",   XSPR(31,339,571),  XSPR_MASK, PPCRFMCI, { RT } },
 { "mfmcsr",     XSPR(31,339,572),  XSPR_MASK, PPCRFMCI, { RT } },
+{ "mfmcar",     XSPR(31,339,573),  XSPR_MASK, PPCRFMCI, { RT } },
 { "mfdpdr",     XSPR(31,339,630),  XSPR_MASK, PPC860,  { RT } },
 { "mfdpir",     XSPR(31,339,631),  XSPR_MASK, PPC860,  { RT } },
 { "mfimmr",     XSPR(31,339,638),  XSPR_MASK, PPC860,  { RT } },
@@ -3775,14 +3946,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mfpbu2",     XSPR(31,339,1023), XSPR_MASK, PPC403,  { RT } },
 { "mfspr",      X(31,339),        X_MASK,    COM,      { RT, SPR } },
 
-{ "lwax",    X(31,341),        X_MASK,         PPC64,          { RT, RA, RB } },
+{ "lwax",    X(31,341),        X_MASK,         PPC64,          { RT, RA0, RB } },
 
 { "dst",     XDSS(31,342,0), XDSS_MASK,        PPCVEC,         { RA, RB, STRM } },
 { "dstt",    XDSS(31,342,1), XDSS_MASK,        PPCVEC,         { RA, RB, STRM } },
 
-{ "lhax",    X(31,343),        X_MASK,         COM,            { RT, RA, RB } },
+{ "lhax",    X(31,343),        X_MASK,         COM,            { RT, RA0, RB } },
 
-{ "lhaxe",   X(31,351),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lhaxe",   X(31,351),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
 { "dstst",   XDSS(31,374,0), XDSS_MASK,        PPCVEC,         { RA, RB, STRM } },
 { "dststt",  XDSS(31,374,1), XDSS_MASK,        PPCVEC,         { RA, RB, STRM } },
@@ -3821,14 +3992,20 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "slbmte",  X(31,402), XRA_MASK,      PPC64,          { RS, RB } },
 
-{ "sthx",    X(31,407),        X_MASK,         COM,            { RS, RA, RB } },
+{ "sthx",    X(31,407),        X_MASK,         COM,            { RS, RA0, RB } },
+
+{ "cmpb",    X(31,508),        X_MASK,         POWER6,         { RA, RS, RB } },
 
 { "lfqx",    X(31,791),        X_MASK,         POWER2,         { FRT, RA, RB } },
 
+{ "lfdpx",   X(31,791),        X_MASK,         POWER6,         { FRT, RA, RB } },
+
 { "lfqux",   X(31,823),        X_MASK,         POWER2,         { FRT, RA, RB } },
 
 { "stfqx",   X(31,919),        X_MASK,         POWER2,         { FRS, RA, RB } },
 
+{ "stfdpx",  X(31,919),        X_MASK,         POWER6,         { FRS, RA, RB } },
+
 { "stfqux",  X(31,951),        X_MASK,         POWER2,         { FRS, RA, RB } },
 
 { "orc",     XRC(31,412,0), X_MASK,    COM,            { RA, RS, RB } },
@@ -3837,7 +4014,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sradi",   XS(31,413,0), XS_MASK,    PPC64,          { RA, RS, SH6 } },
 { "sradi.",  XS(31,413,1), XS_MASK,    PPC64,          { RA, RS, SH6 } },
 
-{ "sthxe",   X(31,415),        X_MASK,         BOOKE64,        { RS, RA, RB } },
+{ "sthxe",   X(31,415),        X_MASK,         BOOKE64,        { RS, RA0, RB } },
 
 { "slbie",   X(31,434),        XRTRA_MASK,     PPC64,          { RB } },
 
@@ -3918,6 +4095,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mtsdr1",    XSPR(31,467,25),   XSPR_MASK, COM,      { RS } },
 { "mtsrr0",    XSPR(31,467,26),   XSPR_MASK, COM,      { RS } },
 { "mtsrr1",    XSPR(31,467,27),   XSPR_MASK, COM,      { RS } },
+{ "mtcfar",    XSPR(31,467,28),   XSPR_MASK, POWER6,   { RS } },
 { "mtpid",     XSPR(31,467,48),   XSPR_MASK, BOOKE,     { RS } },
 { "mtpid",     XSPR(31,467,945),  XSPR_MASK, PPC403,   { RS } },
 { "mtdecar",   XSPR(31,467,54),   XSPR_MASK, BOOKE,     { RS } },
@@ -3946,7 +4124,7 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mtbar",     XSPR(31,467,159),  XSPR_MASK, PPC860,   { RS } },
 { "mtvrsave",  XSPR(31,467,256),  XSPR_MASK, PPCVEC,   { RS } },
 { "mtusprg0",  XSPR(31,467,256),  XSPR_MASK, BOOKE,     { RS } },
-{ "mtsprg",    XSPR(31,467,272),  XSPRG_MASK,PPC,      { SPRG, RS } },
+{ "mtsprg",    XSPR(31,467,256),  XSPRG_MASK,PPC,      { SPRG, RS } },
 { "mtsprg0",   XSPR(31,467,272),  XSPR_MASK, PPC,      { RS } },
 { "mtsprg1",   XSPR(31,467,273),  XSPR_MASK, PPC,      { RS } },
 { "mtsprg2",   XSPR(31,467,274),  XSPR_MASK, PPC,      { RS } },
@@ -4005,6 +4183,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "mtspefscr",  XSPR(31,467,512),  XSPR_MASK, PPCSPE,   { RS } },
 { "mtbbear",   XSPR(31,467,513),  XSPR_MASK, PPCBRLK,   { RS } },
 { "mtbbtar",   XSPR(31,467,514),  XSPR_MASK, PPCBRLK,  { RS } },
+{ "mtivor32",  XSPR(31,467,528),  XSPR_MASK, PPCSPE,   { RS } },
+{ "mtivor33",  XSPR(31,467,529),  XSPR_MASK, PPCSPE,   { RS } },
+{ "mtivor34",  XSPR(31,467,530),  XSPR_MASK, PPCSPE,   { RS } },
+{ "mtivor35",  XSPR(31,467,531),  XSPR_MASK, PPCPMR,   { RS } },
 { "mtibatu",   XSPR(31,467,528),  XSPRBAT_MASK, PPC,   { SPRBAT, RS } },
 { "mtibatl",   XSPR(31,467,529),  XSPRBAT_MASK, PPC,   { SPRBAT, RS } },
 { "mtdbatu",   XSPR(31,467,536),  XSPRBAT_MASK, PPC,   { SPRBAT, RS } },
@@ -4101,13 +4283,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "clcs",    X(31,531), XRB_MASK,      M601,           { RT, RA } },
 
-{ "lswx",    X(31,533),        X_MASK,         PPCCOM,         { RT, RA, RB } },
+{ "ldbrx",   X(31,532),        X_MASK,         CELL,           { RT, RA0, RB } },
+
+{ "lswx",    X(31,533),        X_MASK,         PPCCOM,         { RT, RA0, RB } },
 { "lsx",     X(31,533),        X_MASK,         PWRCOM,         { RT, RA, RB } },
 
-{ "lwbrx",   X(31,534),        X_MASK,         PPCCOM,         { RT, RA, RB } },
+{ "lwbrx",   X(31,534),        X_MASK,         PPCCOM,         { RT, RA0, RB } },
 { "lbrx",    X(31,534),        X_MASK,         PWRCOM,         { RT, RA, RB } },
 
-{ "lfsx",    X(31,535),        X_MASK,         COM,            { FRT, RA, RB } },
+{ "lfsx",    X(31,535),        X_MASK,         COM,            { FRT, RA0, RB } },
 
 { "srw",     XRC(31,536,0), X_MASK,    PPCCOM,         { RA, RS, RB } },
 { "sr",      XRC(31,536,0), X_MASK,    PWRCOM,         { RA, RS, RB } },
@@ -4123,11 +4307,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "maskir",  XRC(31,541,0), X_MASK,    M601,           { RA, RS, RB } },
 { "maskir.", XRC(31,541,1), X_MASK,    M601,           { RA, RS, RB } },
 
-{ "lwbrxe",  X(31,542),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lwbrxe",  X(31,542),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
-{ "lfsxe",   X(31,543),        X_MASK,         BOOKE64,        { FRT, RA, RB } },
+{ "lfsxe",   X(31,543),        X_MASK,         BOOKE64,        { FRT, RA0, RB } },
 
 { "bbelr",   X(31,550),        X_MASK,         PPCBRLK,        { 0 }},
+
 { "tlbsync", X(31,566),        0xffffffff,     PPC,            { 0 } },
 
 { "lfsux",   X(31,567),        X_MASK,         COM,            { FRT, RAS, RB } },
@@ -4136,8 +4321,8 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "mfsr",    X(31,595),        XRB_MASK|(1<<20), COM32,        { RT, SR } },
 
-{ "lswi",    X(31,597),        X_MASK,         PPCCOM,         { RT, RA, NB } },
-{ "lsi",     X(31,597),        X_MASK,         PWRCOM,         { RT, RA, NB } },
+{ "lswi",    X(31,597),        X_MASK,         PPCCOM,         { RT, RA0, NB } },
+{ "lsi",     X(31,597),        X_MASK,         PWRCOM,         { RT, RA0, NB } },
 
 { "lwsync",  XSYNC(31,598,1), 0xffffffff, PPC,         { 0 } },
 { "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64,       { 0 } },
@@ -4145,9 +4330,11 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sync",    X(31,598), XSYNC_MASK,    PPCCOM,         { LS } },
 { "dcs",     X(31,598), 0xffffffff,    PWRCOM,         { 0 } },
 
-{ "lfdx",    X(31,599), X_MASK,                COM,            { FRT, RA, RB } },
+{ "lfdx",    X(31,599), X_MASK,                COM,            { FRT, RA0, RB } },
+
+{ "lfdxe",   X(31,607), X_MASK,                BOOKE64,        { FRT, RA0, RB } },
 
-{ "lfdxe",   X(31,607), X_MASK,                BOOKE64,        { FRT, RA, RB } },
+{ "mffgpr",  XRC(31,607,0), XRA_MASK,  POWER6,         { FRT, RB } },
 
 { "mfsri",   X(31,627), X_MASK,                PWRCOM,         { RT, RA, RB } },
 
@@ -4159,13 +4346,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "mfsrin",  X(31,659), XRA_MASK,      PPC32,          { RT, RB } },
 
-{ "stswx",   X(31,661), X_MASK,                PPCCOM,         { RS, RA, RB } },
-{ "stsx",    X(31,661), X_MASK,                PWRCOM,         { RS, RA, RB } },
+{ "stdbrx",  X(31,660), X_MASK,                CELL,           { RS, RA0, RB } },
+
+{ "stswx",   X(31,661), X_MASK,                PPCCOM,         { RS, RA0, RB } },
+{ "stsx",    X(31,661), X_MASK,                PWRCOM,         { RS, RA0, RB } },
 
-{ "stwbrx",  X(31,662), X_MASK,                PPCCOM,         { RS, RA, RB } },
-{ "stbrx",   X(31,662), X_MASK,                PWRCOM,         { RS, RA, RB } },
+{ "stwbrx",  X(31,662), X_MASK,                PPCCOM,         { RS, RA0, RB } },
+{ "stbrx",   X(31,662), X_MASK,                PWRCOM,         { RS, RA0, RB } },
 
-{ "stfsx",   X(31,663), X_MASK,                COM,            { FRS, RA, RB } },
+{ "stfsx",   X(31,663), X_MASK,                COM,            { FRS, RA0, RB } },
 
 { "srq",     XRC(31,664,0), X_MASK,    M601,           { RA, RS, RB } },
 { "srq.",    XRC(31,664,1), X_MASK,    M601,           { RA, RS, RB } },
@@ -4173,9 +4362,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sre",     XRC(31,665,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sre.",    XRC(31,665,1), X_MASK,    M601,           { RA, RS, RB } },
 
-{ "stwbrxe", X(31,670), X_MASK,                BOOKE64,        { RS, RA, RB } },
+{ "stwbrxe", X(31,670), X_MASK,                BOOKE64,        { RS, RA0, RB } },
 
-{ "stfsxe",  X(31,671), X_MASK,                BOOKE64,        { FRS, RA, RB } },
+{ "stfsxe",  X(31,671), X_MASK,                BOOKE64,        { FRS, RA0, RB } },
 
 { "stfsux",  X(31,695),        X_MASK,         COM,            { FRS, RAS, RB } },
 
@@ -4184,10 +4373,10 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "stfsuxe", X(31,703),        X_MASK,         BOOKE64,        { FRS, RAS, RB } },
 
-{ "stswi",   X(31,725),        X_MASK,         PPCCOM,         { RS, RA, NB } },
-{ "stsi",    X(31,725),        X_MASK,         PWRCOM,         { RS, RA, NB } },
+{ "stswi",   X(31,725),        X_MASK,         PPCCOM,         { RS, RA0, NB } },
+{ "stsi",    X(31,725),        X_MASK,         PWRCOM,         { RS, RA0, NB } },
 
-{ "stfdx",   X(31,727),        X_MASK,         COM,            { FRS, RA, RB } },
+{ "stfdx",   X(31,727),        X_MASK,         COM,            { FRS, RA0, RB } },
 
 { "srlq",    XRC(31,728,0), X_MASK,    M601,           { RA, RS, RB } },
 { "srlq.",   XRC(31,728,1), X_MASK,    M601,           { RA, RS, RB } },
@@ -4195,7 +4384,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "sreq",    XRC(31,729,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sreq.",   XRC(31,729,1), X_MASK,    M601,           { RA, RS, RB } },
 
-{ "stfdxe",  X(31,735),        X_MASK,         BOOKE64,        { FRS, RA, RB } },
+{ "stfdxe",  X(31,735),        X_MASK,         BOOKE64,        { FRS, RA0, RB } },
+
+{ "mftgpr",  XRC(31,735,0), XRA_MASK,  POWER6,         { RT, FRB } },
 
 { "dcba",    X(31,758),        XRT_MASK,       PPC405 | BOOKE, { RA, RB } },
 
@@ -4211,7 +4402,9 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "tlbivax", X(31,786),        XRT_MASK,       BOOKE,          { RA, RB } },
 { "tlbivaxe",X(31,787),        XRT_MASK,       BOOKE64,        { RA, RB } },
 
-{ "lhbrx",   X(31,790),        X_MASK,         COM,            { RT, RA, RB } },
+{ "lwzcix",  X(31,789),        X_MASK,         POWER6,         { RT, RA0, RB } },
+
+{ "lhbrx",   X(31,790),        X_MASK,         COM,            { RT, RA0, RB } },
 
 { "sraw",    XRC(31,792,0), X_MASK,    PPCCOM,         { RA, RS, RB } },
 { "sra",     XRC(31,792,0), X_MASK,    PWRCOM,         { RA, RS, RB } },
@@ -4221,13 +4414,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "srad",    XRC(31,794,0), X_MASK,    PPC64,          { RA, RS, RB } },
 { "srad.",   XRC(31,794,1), X_MASK,    PPC64,          { RA, RS, RB } },
 
-{ "lhbrxe",  X(31,798),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "lhbrxe",  X(31,798),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
-{ "ldxe",    X(31,799),        X_MASK,         BOOKE64,        { RT, RA, RB } },
-{ "lduxe",   X(31,831),        X_MASK,         BOOKE64,        { RT, RA, RB } },
+{ "ldxe",    X(31,799),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
+{ "lduxe",   X(31,831),        X_MASK,         BOOKE64,        { RT, RA0, RB } },
 
 { "rac",     X(31,818),        X_MASK,         PWRCOM,         { RT, RA, RB } },
 
+{ "lhzcix",  X(31,821),        X_MASK,         POWER6,         { RT, RA0, RB } },
+
 { "dss",     XDSS(31,822,0), XDSS_MASK,        PPCVEC,         { STRM } },
 { "dssall",  XDSS(31,822,1), XDSS_MASK,        PPCVEC,         { 0 } },
 
@@ -4238,19 +4433,25 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "slbmfev", X(31,851), XRA_MASK,      PPC64,          { RT, RB } },
 
+{ "lbzcix",  X(31,853),        X_MASK,         POWER6,         { RT, RA0, RB } },
+
 { "mbar",    X(31,854),        X_MASK,         BOOKE,          { MO } },
 { "eieio",   X(31,854),        0xffffffff,     PPC,            { 0 } },
 
-{ "tlbsx",   XRC(31,914,0), X_MASK,    BOOKE,          { RA, RB } },
-{ "tlbsx",   XRC(31,914,0), X_MASK,    PPC403,         { RT, RA, RB } },
-{ "tlbsx.",  XRC(31,914,1), X_MASK,    BOOKE,          { RA, RB } },
-{ "tlbsx.",  XRC(31,914,1), X_MASK,    PPC403,         { RT, RA, RB } },
+{ "lfiwax",  X(31,855),        X_MASK,         POWER6,         { FRT, RA0, RB } },
+
+{ "ldcix",   X(31,885),        X_MASK,         POWER6,         { RT, RA0, RB } },
+
+{ "tlbsx",   XRC(31,914,0), X_MASK,    PPC403|BOOKE,   { RTO, RA, RB } },
+{ "tlbsx.",  XRC(31,914,1), X_MASK,    PPC403|BOOKE,   { RTO, RA, RB } },
 { "tlbsxe",  XRC(31,915,0), X_MASK,    BOOKE64,        { RA, RB } },
 { "tlbsxe.", XRC(31,915,1), X_MASK,    BOOKE64,        { RA, RB } },
 
 { "slbmfee", X(31,915), XRA_MASK,      PPC64,          { RT, RB } },
 
-{ "sthbrx",  X(31,918),        X_MASK,         COM,            { RS, RA, RB } },
+{ "stwcix",  X(31,917),        X_MASK,         POWER6,         { RS, RA0, RB } },
+
+{ "sthbrx",  X(31,918),        X_MASK,         COM,            { RS, RA0, RB } },
 
 { "sraq",    XRC(31,920,0), X_MASK,    M601,           { RA, RS, RB } },
 { "sraq.",   XRC(31,920,1), X_MASK,    M601,           { RA, RS, RB } },
@@ -4263,14 +4464,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "extsh.",  XRC(31,922,1), XRB_MASK,  PPCCOM,         { RA, RS } },
 { "exts.",   XRC(31,922,1), XRB_MASK,  PWRCOM,         { RA, RS } },
 
-{ "sthbrxe", X(31,926),        X_MASK,         BOOKE64,        { RS, RA, RB } },
+{ "sthbrxe", X(31,926),        X_MASK,         BOOKE64,        { RS, RA0, RB } },
 
-{ "stdxe",   X(31,927), X_MASK,                BOOKE64,        { RS, RA, RB } },
+{ "stdxe",   X(31,927), X_MASK,                BOOKE64,        { RS, RA0, RB } },
 
 { "tlbrehi", XTLB(31,946,0), XTLB_MASK,        PPC403,         { RT, RA } },
 { "tlbrelo", XTLB(31,946,1), XTLB_MASK,        PPC403,         { RT, RA } },
-{ "tlbre",   X(31,946),        X_MASK,         BOOKE,          { 0 } },
-{ "tlbre",   X(31,946),        X_MASK,         PPC403,         { RS, RA, SH } },
+{ "tlbre",   X(31,946),        X_MASK,         PPC403|BOOKE,   { RSO, RAOPT, SHO } },
+
+{ "sthcix",  X(31,949),        X_MASK,         POWER6,         { RS, RA0, RB } },
 
 { "sraiq",   XRC(31,952,0), X_MASK,    M601,           { RA, RS, SH } },
 { "sraiq.",  XRC(31,952,1), X_MASK,    M601,           { RA, RS, SH } },
@@ -4284,13 +4486,14 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "tlbwehi", XTLB(31,978,0), XTLB_MASK,        PPC403,         { RT, RA } },
 { "tlbwelo", XTLB(31,978,1), XTLB_MASK,        PPC403,         { RT, RA } },
-{ "tlbwe",   X(31,978),        X_MASK,         BOOKE,          { 0 } },
-{ "tlbwe",   X(31,978),        X_MASK,         PPC403,         { RS, RA, SH } },
+{ "tlbwe",   X(31,978),        X_MASK,         PPC403|BOOKE,   { RSO, RAOPT, SHO } },
 { "tlbld",   X(31,978),        XRTRA_MASK,     PPC,            { RB } },
 
+{ "stbcix",  X(31,981),        X_MASK,         POWER6,         { RS, RA0, RB } },
+
 { "icbi",    X(31,982),        XRT_MASK,       PPC,            { RA, RB } },
 
-{ "stfiwx",  X(31,983),        X_MASK,         PPC,            { FRS, RA, RB } },
+{ "stfiwx",  X(31,983),        X_MASK,         PPC,            { FRS, RA0, RB } },
 
 { "extsw",   XRC(31,986,0), XRB_MASK,  PPC64 | BOOKE64,{ RA, RS } },
 { "extsw.",  XRC(31,986,1), XRB_MASK,  PPC64,          { RA, RS } },
@@ -4298,10 +4501,13 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "icread",  X(31,998),        XRT_MASK,       PPC403|PPC440,  { RA, RB } },
 
 { "icbie",   X(31,990),        XRT_MASK,       BOOKE64,        { RA, RB } },
-{ "stfiwxe", X(31,991),        X_MASK,         BOOKE64,        { FRS, RA, RB } },
+{ "stfiwxe", X(31,991),        X_MASK,         BOOKE64,        { FRS, RA0, RB } },
 
 { "tlbli",   X(31,1010), XRTRA_MASK,   PPC,            { RB } },
 
+{ "stdcix",  X(31,1013), X_MASK,       POWER6,         { RS, RA0, RB } },
+
+{ "dcbzl",   XOPL(31,1014,1), XRT_MASK,POWER4,            { RA, RB } },
 { "dcbz",    X(31,1014), XRT_MASK,     PPC,            { RA, RB } },
 { "dclz",    X(31,1014), XRT_MASK,     PPC,            { RA, RB } },
 
@@ -4320,86 +4526,104 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "stvx",    X(31, 231), X_MASK,       PPCVEC,         { VS, RA, RB } },
 { "stvxl",   X(31, 487), X_MASK,       PPCVEC,         { VS, RA, RB } },
 
-{ "lwz",     OP(32),   OP_MASK,        PPCCOM,         { RT, D, RA } },
-{ "l",      OP(32),    OP_MASK,        PWRCOM,         { RT, D, RA } },
+/* New load/store left/right index vector instructions that are in the Cell only.  */
+{ "lvlx",    X(31, 519), X_MASK,       CELL,           { VD, RA0, RB } },
+{ "lvlxl",   X(31, 775), X_MASK,       CELL,           { VD, RA0, RB } },
+{ "lvrx",    X(31, 551), X_MASK,       CELL,           { VD, RA0, RB } },
+{ "lvrxl",   X(31, 807), X_MASK,       CELL,           { VD, RA0, RB } },
+{ "stvlx",   X(31, 647), X_MASK,       CELL,           { VS, RA0, RB } },
+{ "stvlxl",  X(31, 903), X_MASK,       CELL,           { VS, RA0, RB } },
+{ "stvrx",   X(31, 679), X_MASK,       CELL,           { VS, RA0, RB } },
+{ "stvrxl",  X(31, 935), X_MASK,       CELL,           { VS, RA0, RB } },
+
+{ "lwz",     OP(32),   OP_MASK,        PPCCOM,         { RT, D, RA0 } },
+{ "l",      OP(32),    OP_MASK,        PWRCOM,         { RT, D, RA0 } },
 
 { "lwzu",    OP(33),   OP_MASK,        PPCCOM,         { RT, D, RAL } },
-{ "lu",      OP(33),   OP_MASK,        PWRCOM,         { RT, D, RA } },
+{ "lu",      OP(33),   OP_MASK,        PWRCOM,         { RT, D, RA0 } },
 
-{ "lbz",     OP(34),   OP_MASK,        COM,            { RT, D, RA } },
+{ "lbz",     OP(34),   OP_MASK,        COM,            { RT, D, RA0 } },
 
 { "lbzu",    OP(35),   OP_MASK,        COM,            { RT, D, RAL } },
 
-{ "stw",     OP(36),   OP_MASK,        PPCCOM,         { RS, D, RA } },
-{ "st",      OP(36),   OP_MASK,        PWRCOM,         { RS, D, RA } },
+{ "stw",     OP(36),   OP_MASK,        PPCCOM,         { RS, D, RA0 } },
+{ "st",      OP(36),   OP_MASK,        PWRCOM,         { RS, D, RA0 } },
 
 { "stwu",    OP(37),   OP_MASK,        PPCCOM,         { RS, D, RAS } },
-{ "stu",     OP(37),   OP_MASK,        PWRCOM,         { RS, D, RA } },
+{ "stu",     OP(37),   OP_MASK,        PWRCOM,         { RS, D, RA0 } },
 
-{ "stb",     OP(38),   OP_MASK,        COM,            { RS, D, RA } },
+{ "stb",     OP(38),   OP_MASK,        COM,            { RS, D, RA0 } },
 
 { "stbu",    OP(39),   OP_MASK,        COM,            { RS, D, RAS } },
 
-{ "lhz",     OP(40),   OP_MASK,        COM,            { RT, D, RA } },
+{ "lhz",     OP(40),   OP_MASK,        COM,            { RT, D, RA0 } },
 
 { "lhzu",    OP(41),   OP_MASK,        COM,            { RT, D, RAL } },
 
-{ "lha",     OP(42),   OP_MASK,        COM,            { RT, D, RA } },
+{ "lha",     OP(42),   OP_MASK,        COM,            { RT, D, RA0 } },
 
 { "lhau",    OP(43),   OP_MASK,        COM,            { RT, D, RAL } },
 
-{ "sth",     OP(44),   OP_MASK,        COM,            { RS, D, RA } },
+{ "sth",     OP(44),   OP_MASK,        COM,            { RS, D, RA0 } },
 
 { "sthu",    OP(45),   OP_MASK,        COM,            { RS, D, RAS } },
 
 { "lmw",     OP(46),   OP_MASK,        PPCCOM,         { RT, D, RAM } },
-{ "lm",      OP(46),   OP_MASK,        PWRCOM,         { RT, D, RA } },
+{ "lm",      OP(46),   OP_MASK,        PWRCOM,         { RT, D, RA0 } },
 
-{ "stmw",    OP(47),   OP_MASK,        PPCCOM,         { RS, D, RA } },
-{ "stm",     OP(47),   OP_MASK,        PWRCOM,         { RS, D, RA } },
+{ "stmw",    OP(47),   OP_MASK,        PPCCOM,         { RS, D, RA0 } },
+{ "stm",     OP(47),   OP_MASK,        PWRCOM,         { RS, D, RA0 } },
 
-{ "lfs",     OP(48),   OP_MASK,        COM,            { FRT, D, RA } },
+{ "lfs",     OP(48),   OP_MASK,        COM,            { FRT, D, RA0 } },
 
 { "lfsu",    OP(49),   OP_MASK,        COM,            { FRT, D, RAS } },
 
-{ "lfd",     OP(50),   OP_MASK,        COM,            { FRT, D, RA } },
+{ "lfd",     OP(50),   OP_MASK,        COM,            { FRT, D, RA0 } },
 
 { "lfdu",    OP(51),   OP_MASK,        COM,            { FRT, D, RAS } },
 
-{ "stfs",    OP(52),   OP_MASK,        COM,            { FRS, D, RA } },
+{ "stfs",    OP(52),   OP_MASK,        COM,            { FRS, D, RA0 } },
 
 { "stfsu",   OP(53),   OP_MASK,        COM,            { FRS, D, RAS } },
 
-{ "stfd",    OP(54),   OP_MASK,        COM,            { FRS, D, RA } },
+{ "stfd",    OP(54),   OP_MASK,        COM,            { FRS, D, RA0 } },
 
 { "stfdu",   OP(55),   OP_MASK,        COM,            { FRS, D, RAS } },
 
 { "lq",      OP(56),   OP_MASK,        POWER4,         { RTQ, DQ, RAQ } },
 
-{ "lfq",     OP(56),   OP_MASK,        POWER2,         { FRT, D, RA } },
+{ "lfq",     OP(56),   OP_MASK,        POWER2,         { FRT, D, RA0 } },
+
+{ "lfqu",    OP(57),   OP_MASK,        POWER2,         { FRT, D, RA0 } },
 
-{ "lfqu",    OP(57),   OP_MASK,        POWER2,         { FRT, D, RA } },
+{ "lfdp",    OP(57),   OP_MASK,        POWER6,         { FRT, D, RA0 } },
 
-{ "lbze",    DEO(58,0), DE_MASK,       BOOKE64,        { RT, DE, RA } },
+{ "lbze",    DEO(58,0), DE_MASK,       BOOKE64,        { RT, DE, RA0 } },
 { "lbzue",   DEO(58,1), DE_MASK,       BOOKE64,        { RT, DE, RAL } },
-{ "lhze",    DEO(58,2), DE_MASK,       BOOKE64,        { RT, DE, RA } },
+{ "lhze",    DEO(58,2), DE_MASK,       BOOKE64,        { RT, DE, RA0 } },
 { "lhzue",   DEO(58,3), DE_MASK,       BOOKE64,        { RT, DE, RAL } },
-{ "lhae",    DEO(58,4), DE_MASK,       BOOKE64,        { RT, DE, RA } },
+{ "lhae",    DEO(58,4), DE_MASK,       BOOKE64,        { RT, DE, RA0 } },
 { "lhaue",   DEO(58,5), DE_MASK,       BOOKE64,        { RT, DE, RAL } },
-{ "lwze",    DEO(58,6), DE_MASK,       BOOKE64,        { RT, DE, RA } },
+{ "lwze",    DEO(58,6), DE_MASK,       BOOKE64,        { RT, DE, RA0 } },
 { "lwzue",   DEO(58,7), DE_MASK,       BOOKE64,        { RT, DE, RAL } },
-{ "stbe",    DEO(58,8), DE_MASK,       BOOKE64,        { RS, DE, RA } },
+{ "stbe",    DEO(58,8), DE_MASK,       BOOKE64,        { RS, DE, RA0 } },
 { "stbue",   DEO(58,9), DE_MASK,       BOOKE64,        { RS, DE, RAS } },
-{ "sthe",    DEO(58,10), DE_MASK,      BOOKE64,        { RS, DE, RA } },
+{ "sthe",    DEO(58,10), DE_MASK,      BOOKE64,        { RS, DE, RA0 } },
 { "sthue",   DEO(58,11), DE_MASK,      BOOKE64,        { RS, DE, RAS } },
-{ "stwe",    DEO(58,14), DE_MASK,      BOOKE64,        { RS, DE, RA } },
+{ "stwe",    DEO(58,14), DE_MASK,      BOOKE64,        { RS, DE, RA0 } },
 { "stwue",   DEO(58,15), DE_MASK,      BOOKE64,        { RS, DE, RAS } },
 
-{ "ld",      DSO(58,0),        DS_MASK,        PPC64,          { RT, DS, RA } },
+{ "ld",      DSO(58,0),        DS_MASK,        PPC64,          { RT, DS, RA0 } },
 
 { "ldu",     DSO(58,1), DS_MASK,       PPC64,          { RT, DS, RAL } },
 
-{ "lwa",     DSO(58,2), DS_MASK,       PPC64,          { RT, DS, RA } },
+{ "lwa",     DSO(58,2), DS_MASK,       PPC64,          { RT, DS, RA0 } },
+
+{ "dadd",    XRC(59,2,0), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+{ "dadd.",   XRC(59,2,1), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+
+{ "dqua",    ZRC(59,3,0), Z_MASK,      POWER6,         { FRT, FRA, FRB, RMC } },
+{ "dqua.",   ZRC(59,3,1), Z_MASK,      POWER6,         { FRT, FRA, FRB, RMC } },
 
 { "fdivs",   A(59,18,0), AFRC_MASK,    PPC,            { FRT, FRA, FRB } },
 { "fdivs.",  A(59,18,1), AFRC_MASK,    PPC,            { FRT, FRA, FRB } },
@@ -4413,12 +4637,15 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "fsqrts",  A(59,22,0), AFRAFRC_MASK, PPC,            { FRT, FRB } },
 { "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC,            { FRT, FRB } },
 
-{ "fres",    A(59,24,0), AFRAFRC_MASK, PPC,            { FRT, FRB } },
-{ "fres.",   A(59,24,1), AFRAFRC_MASK, PPC,            { FRT, FRB } },
+{ "fres",    A(59,24,0), AFRALFRC_MASK,        PPC,            { FRT, FRB, A_L } },
+{ "fres.",   A(59,24,1), AFRALFRC_MASK,        PPC,            { FRT, FRB, A_L } },
 
 { "fmuls",   A(59,25,0), AFRB_MASK,    PPC,            { FRT, FRA, FRC } },
 { "fmuls.",  A(59,25,1), AFRB_MASK,    PPC,            { FRT, FRA, FRC } },
 
+{ "frsqrtes", A(59,26,0), AFRALFRC_MASK,POWER5,                { FRT, FRB, A_L } },
+{ "frsqrtes.",A(59,26,1), AFRALFRC_MASK,POWER5,                { FRT, FRB, A_L } },
+
 { "fmsubs",  A(59,28,0), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 { "fmsubs.", A(59,28,1), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 
@@ -4431,31 +4658,103 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "fnmadds", A(59,31,0), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 { "fnmadds.",A(59,31,1), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 
+{ "dmul",    XRC(59,34,0), X_MASK,     POWER6,         { FRT, FRA, FRB } },
+{ "dmul.",   XRC(59,34,1), X_MASK,     POWER6,         { FRT, FRA, FRB } },
+
+{ "drrnd",   ZRC(59,35,0), Z_MASK,     POWER6,         { FRT, FRA, FRB, RMC } },
+{ "drrnd.",  ZRC(59,35,1), Z_MASK,     POWER6,         { FRT, FRA, FRB, RMC } },
+
+{ "dscli",   ZRC(59,66,0), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+{ "dscli.",  ZRC(59,66,1), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+
+{ "dquai",   ZRC(59,67,0), Z_MASK,     POWER6,         { TE,  FRT, FRB, RMC } },
+{ "dquai.",  ZRC(59,67,1), Z_MASK,     POWER6,         { TE,  FRT, FRB, RMC } },
+
+{ "dscri",   ZRC(59,98,0), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+{ "dscri.",  ZRC(59,98,1), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+
+{ "drintx",  ZRC(59,99,0), Z_MASK,     POWER6,         { R, FRT, FRB, RMC } },
+{ "drintx.", ZRC(59,99,1), Z_MASK,     POWER6,         { R, FRT, FRB, RMC } },
+
+{ "dcmpo",   X(59,130),           X_MASK,      POWER6,         { BF,  FRA, FRB } },
+
+{ "dtstex",  X(59,162),           X_MASK,      POWER6,         { BF,  FRA, FRB } },
+{ "dtstdc",  Z(59,194),           Z_MASK,      POWER6,         { BF,  FRA, DCM } },
+{ "dtstdg",  Z(59,226),           Z_MASK,      POWER6,         { BF,  FRA, DGM } },
+
+{ "drintn",  ZRC(59,227,0), Z_MASK,    POWER6,         { R, FRT, FRB, RMC } },
+{ "drintn.", ZRC(59,227,1), Z_MASK,    POWER6,         { R, FRT, FRB, RMC } },
+
+{ "dctdp",   XRC(59,258,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dctdp.",  XRC(59,258,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "dctfix",  XRC(59,290,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dctfix.", XRC(59,290,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "ddedpd",  XRC(59,322,0), X_MASK,    POWER6,         { SP, FRT, FRB } }, 
+{ "ddedpd.", XRC(59,322,1), X_MASK,    POWER6,         { SP, FRT, FRB } }, 
+
+{ "dxex",    XRC(59,354,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dxex.",   XRC(59,354,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "dsub",    XRC(59,514,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "dsub.",   XRC(59,514,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
+{ "ddiv",    XRC(59,546,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "ddiv.",   XRC(59,546,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
+{ "dcmpu",   X(59,642),            X_MASK,     POWER6,         { BF,  FRA, FRB } },
+
+{ "dtstsf",  X(59,674),           X_MASK,      POWER6,         { BF,  FRA, FRB } },
+
+{ "drsp",    XRC(59,770,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "drsp.",   XRC(59,770,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "dcffix",  XRC(59,802,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dcffix.", XRC(59,802,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "denbcd",  XRC(59,834,0), X_MASK,    POWER6,         { S, FRT, FRB } },
+{ "denbcd.", XRC(59,834,1), X_MASK,    POWER6,         { S, FRT, FRB } },
+
+{ "diex",    XRC(59,866,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "diex.",   XRC(59,866,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
 { "stfq",    OP(60),   OP_MASK,        POWER2,         { FRS, D, RA } },
 
 { "stfqu",   OP(61),   OP_MASK,        POWER2,         { FRS, D, RA } },
 
-{ "lde",     DEO(62,0), DE_MASK,       BOOKE64,        { RT, DES, RA } },
-{ "ldue",    DEO(62,1), DE_MASK,       BOOKE64,        { RT, DES, RA } },
-{ "lfse",    DEO(62,4), DE_MASK,       BOOKE64,        { FRT, DES, RA } },
+{ "stfdp",   OP(61),   OP_MASK,        POWER6,         { FRT, D, RA0 } },
+
+{ "lde",     DEO(62,0), DE_MASK,       BOOKE64,        { RT, DES, RA0 } },
+{ "ldue",    DEO(62,1), DE_MASK,       BOOKE64,        { RT, DES, RA0 } },
+{ "lfse",    DEO(62,4), DE_MASK,       BOOKE64,        { FRT, DES, RA0 } },
 { "lfsue",   DEO(62,5), DE_MASK,       BOOKE64,        { FRT, DES, RAS } },
-{ "lfde",    DEO(62,6), DE_MASK,       BOOKE64,        { FRT, DES, RA } },
+{ "lfde",    DEO(62,6), DE_MASK,       BOOKE64,        { FRT, DES, RA0 } },
 { "lfdue",   DEO(62,7), DE_MASK,       BOOKE64,        { FRT, DES, RAS } },
-{ "stde",    DEO(62,8), DE_MASK,       BOOKE64,        { RS, DES, RA } },
+{ "stde",    DEO(62,8), DE_MASK,       BOOKE64,        { RS, DES, RA0 } },
 { "stdue",   DEO(62,9), DE_MASK,       BOOKE64,        { RS, DES, RAS } },
-{ "stfse",   DEO(62,12), DE_MASK,      BOOKE64,        { FRS, DES, RA } },
+{ "stfse",   DEO(62,12), DE_MASK,      BOOKE64,        { FRS, DES, RA0 } },
 { "stfsue",  DEO(62,13), DE_MASK,      BOOKE64,        { FRS, DES, RAS } },
-{ "stfde",   DEO(62,14), DE_MASK,      BOOKE64,        { FRS, DES, RA } },
+{ "stfde",   DEO(62,14), DE_MASK,      BOOKE64,        { FRS, DES, RA0 } },
 { "stfdue",  DEO(62,15), DE_MASK,      BOOKE64,        { FRS, DES, RAS } },
 
-{ "std",     DSO(62,0),        DS_MASK,        PPC64,          { RS, DS, RA } },
+{ "std",     DSO(62,0),        DS_MASK,        PPC64,          { RS, DS, RA0 } },
 
 { "stdu",    DSO(62,1),        DS_MASK,        PPC64,          { RS, DS, RAS } },
 
-{ "stq",     DSO(62,2),        DS_MASK,        POWER4,         { RSQ, DS, RA } },
+{ "stq",     DSO(62,2),        DS_MASK,        POWER4,         { RSQ, DS, RA0 } },
 
 { "fcmpu",   X(63,0),  X_MASK|(3<<21), COM,            { BF, FRA, FRB } },
 
+{ "daddq",   XRC(63,2,0), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+{ "daddq.",  XRC(63,2,1), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+
+{ "dquaq",   ZRC(63,3,0), Z_MASK,      POWER6,         { FRT, FRA, FRB, RMC } },
+{ "dquaq.",  ZRC(63,3,1), Z_MASK,      POWER6,         { FRT, FRA, FRB, RMC } },
+
+{ "fcpsgn",  XRC(63,8,0), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+{ "fcpsgn.", XRC(63,8,1), X_MASK,      POWER6,         { FRT, FRA, FRB } },
+
 { "frsp",    XRC(63,12,0), XRA_MASK,   COM,            { FRT, FRB } },
 { "frsp.",   XRC(63,12,1), XRA_MASK,   COM,            { FRT, FRB } },
 
@@ -4490,13 +4789,16 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 { "fsel",    A(63,23,0), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 { "fsel.",   A(63,23,1), A_MASK,       PPC,            { FRT,FRA,FRC,FRB } },
 
+{ "fre",     A(63,24,0), AFRALFRC_MASK,        POWER5,         { FRT, FRB, A_L } },
+{ "fre.",    A(63,24,1), AFRALFRC_MASK,        POWER5,         { FRT, FRB, A_L } },
+
 { "fmul",    A(63,25,0), AFRB_MASK,    PPCCOM,         { FRT, FRA, FRC } },
 { "fm",      A(63,25,0), AFRB_MASK,    PWRCOM,         { FRT, FRA, FRC } },
 { "fmul.",   A(63,25,1), AFRB_MASK,    PPCCOM,         { FRT, FRA, FRC } },
 { "fm.",     A(63,25,1), AFRB_MASK,    PWRCOM,         { FRT, FRA, FRC } },
 
-{ "frsqrte", A(63,26,0), AFRAFRC_MASK, PPC,            { FRT, FRB } },
-{ "frsqrte.",A(63,26,1), AFRAFRC_MASK, PPC,            { FRT, FRB } },
+{ "frsqrte", A(63,26,0), AFRALFRC_MASK,        PPC,            { FRT, FRB, A_L } },
+{ "frsqrte.",A(63,26,1), AFRALFRC_MASK,        PPC,            { FRT, FRB, A_L } },
 
 { "fmsub",   A(63,28,0), A_MASK,       PPCCOM,         { FRT,FRA,FRC,FRB } },
 { "fms",     A(63,28,0), A_MASK,       PWRCOM,         { FRT,FRA,FRC,FRB } },
@@ -4520,6 +4822,12 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "fcmpo",   X(63,32), X_MASK|(3<<21), COM,            { BF, FRA, FRB } },
 
+{ "dmulq",   XRC(63,34,0), X_MASK,     POWER6,         { FRT, FRA, FRB } },
+{ "dmulq.",  XRC(63,34,1), X_MASK,     POWER6,         { FRT, FRA, FRB } },
+
+{ "drrndq",  ZRC(63,35,0), Z_MASK,     POWER6,         { FRT, FRA, FRB, RMC } },
+{ "drrndq.", ZRC(63,35,1), Z_MASK,     POWER6,         { FRT, FRA, FRB, RMC } },
+
 { "mtfsb1",  XRC(63,38,0), XRARB_MASK, COM,            { BT } },
 { "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM,            { BT } },
 
@@ -4528,36 +4836,100 @@ const struct powerpc_opcode powerpc_opcodes[] = {
 
 { "mcrfs",   X(63,64), XRB_MASK|(3<<21)|(3<<16), COM,  { BF, BFA } },
 
+{ "dscliq",  ZRC(63,66,0), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+{ "dscliq.", ZRC(63,66,1), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+
+{ "dquaiq",  ZRC(63,67,0), Z_MASK,     POWER6,         { TE,  FRT, FRB, RMC } },
+{ "dquaiq.", ZRC(63,67,1), Z_MASK,     POWER6,         { FRT, FRA, FRB, RMC } },
+
 { "mtfsb0",  XRC(63,70,0), XRARB_MASK, COM,            { BT } },
 { "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM,            { BT } },
 
 { "fmr",     XRC(63,72,0), XRA_MASK,   COM,            { FRT, FRB } },
 { "fmr.",    XRC(63,72,1), XRA_MASK,   COM,            { FRT, FRB } },
 
+{ "dscriq",  ZRC(63,98,0), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+{ "dscriq.", ZRC(63,98,1), Z_MASK,     POWER6,         { FRT, FRA, SH16 } },
+
+{ "drintxq", ZRC(63,99,0), Z_MASK,     POWER6,         { R, FRT, FRB, RMC } },
+{ "drintxq.",ZRC(63,99,1), Z_MASK,     POWER6,         { R, FRT, FRB, RMC } },
+
+{ "dcmpoq",  X(63,130),           X_MASK,      POWER6,         { BF,  FRA, FRB } },
+
 { "mtfsfi",  XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
 { "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
 
 { "fnabs",   XRC(63,136,0), XRA_MASK,  COM,            { FRT, FRB } },
 { "fnabs.",  XRC(63,136,1), XRA_MASK,  COM,            { FRT, FRB } },
 
+{ "dtstexq", X(63,162),            X_MASK,     POWER6,         { BF,  FRA, FRB } },
+{ "dtstdcq", Z(63,194),            Z_MASK,     POWER6,         { BF,  FRA, DCM } },
+{ "dtstdgq", Z(63,226),            Z_MASK,     POWER6,         { BF,  FRA, DGM } },
+
+{ "drintnq", ZRC(63,227,0), Z_MASK,    POWER6,         { R, FRT, FRB, RMC } },
+{ "drintnq.",ZRC(63,227,1), Z_MASK,    POWER6,         { R, FRT, FRB, RMC } },
+
+{ "dctqpq",  XRC(63,258,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dctqpq.", XRC(63,258,1), X_MASK,    POWER6,         { FRT, FRB } },
+
 { "fabs",    XRC(63,264,0), XRA_MASK,  COM,            { FRT, FRB } },
 { "fabs.",   XRC(63,264,1), XRA_MASK,  COM,            { FRT, FRB } },
 
+{ "dctfixq", XRC(63,290,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dctfixq.",XRC(63,290,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "ddedpdq", XRC(63,322,0), X_MASK,    POWER6,         { SP, FRT, FRB } },
+{ "ddedpdq.",XRC(63,322,1), X_MASK,    POWER6,         { SP, FRT, FRB } },
+
+{ "dxexq",   XRC(63,354,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dxexq.",  XRC(63,354,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "frin",    XRC(63,392,0), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "frin.",   XRC(63,392,1), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "friz",    XRC(63,424,0), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "friz.",   XRC(63,424,1), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "frip",    XRC(63,456,0), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "frip.",   XRC(63,456,1), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "frim",    XRC(63,488,0), XRA_MASK,  POWER5,         { FRT, FRB } },
+{ "frim.",   XRC(63,488,1), XRA_MASK,  POWER5,         { FRT, FRB } },
+
+{ "dsubq",   XRC(63,514,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "dsubq.",  XRC(63,514,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
+{ "ddivq",   XRC(63,546,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "ddivq.",  XRC(63,546,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
 { "mffs",    XRC(63,583,0), XRARB_MASK,        COM,            { FRT } },
 { "mffs.",   XRC(63,583,1), XRARB_MASK,        COM,            { FRT } },
 
+{ "dcmpuq",  X(63,642),            X_MASK,     POWER6,         { BF,  FRA, FRB } },
+
+{ "dtstsfq", X(63,674),            X_MASK,     POWER6,         { BF,  FRA, FRB } },
+
 { "mtfsf",   XFL(63,711,0), XFL_MASK,  COM,            { FLM, FRB } },
 { "mtfsf.",  XFL(63,711,1), XFL_MASK,  COM,            { FLM, FRB } },
 
+{ "drdpq",   XRC(63,770,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "drdpq.",  XRC(63,770,1), X_MASK,    POWER6,         { FRT, FRB } },
+
+{ "dcffixq", XRC(63,802,0), X_MASK,    POWER6,         { FRT, FRB } },
+{ "dcffixq.",XRC(63,802,1), X_MASK,    POWER6,         { FRT, FRB } },
+
 { "fctid",   XRC(63,814,0), XRA_MASK,  PPC64,          { FRT, FRB } },
 { "fctid.",  XRC(63,814,1), XRA_MASK,  PPC64,          { FRT, FRB } },
 
 { "fctidz",  XRC(63,815,0), XRA_MASK,  PPC64,          { FRT, FRB } },
 { "fctidz.", XRC(63,815,1), XRA_MASK,  PPC64,          { FRT, FRB } },
 
+{ "denbcdq", XRC(63,834,0), X_MASK,    POWER6,         { S, FRT, FRB } },
+{ "denbcdq.",XRC(63,834,1), X_MASK,    POWER6,         { S, FRT, FRB } },
+
 { "fcfid",   XRC(63,846,0), XRA_MASK,  PPC64,          { FRT, FRB } },
 { "fcfid.",  XRC(63,846,1), XRA_MASK,  PPC64,          { FRT, FRB } },
 
+{ "diexq",   XRC(63,866,0), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+{ "diexq.",  XRC(63,866,1), X_MASK,    POWER6,         { FRT, FRA, FRB } },
+
 };
 
 const int powerpc_num_opcodes =
index 342237e8dd6956f44e66260bb1d5d9874d340742..110df96354b4419f83415948cc402d4dc2db5cdd 100644 (file)
@@ -1,5 +1,5 @@
 /* ppc.h -- Header file for PowerPC opcode table
-   Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003
+   Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
    Free Software Foundation, Inc.
    Written by Ian Lance Taylor, Cygnus Support
 
@@ -17,7 +17,7 @@ the GNU General Public License for more details.
 
 You should have received a copy of the GNU General Public License
 along with this file; see the file COPYING.  If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
+Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
 #ifndef PPC_H
 #define PPC_H
@@ -134,6 +134,18 @@ extern const int powerpc_num_opcodes;
 /* Opcode is supported by machine check APU.  */
 #define PPC_OPCODE_RFMCI         0x800000
 
+/* Opcode is only supported by Power5 architecture.  */
+#define PPC_OPCODE_POWER5       0x1000000
+
+/* Opcode is supported by PowerPC e300 family.  */
+#define PPC_OPCODE_E300          0x2000000
+
+/* Opcode is only supported by Power6 architecture.  */
+#define PPC_OPCODE_POWER6       0x4000000
+
+/* Opcode is only supported by PowerPC Cell family.  */
+#define PPC_OPCODE_CELL                 0x8000000
+
 /* A macro to extract the major opcode from an instruction.  */
 #define PPC_OP(i) (((i) >> 26) & 0x3f)
 \f
@@ -233,25 +245,28 @@ extern const struct powerpc_operand powerpc_operands[];
    register names with a leading 'r'.  */
 #define PPC_OPERAND_GPR (040)
 
+/* Like PPC_OPERAND_GPR, but don't print a leading 'r' for r0.  */
+#define PPC_OPERAND_GPR_0 (0100)
+
 /* This operand names a floating point register.  The disassembler
    prints these with a leading 'f'.  */
-#define PPC_OPERAND_FPR (0100)
+#define PPC_OPERAND_FPR (0200)
 
 /* This operand is a relative branch displacement.  The disassembler
    prints these symbolically if possible.  */
-#define PPC_OPERAND_RELATIVE (0200)
+#define PPC_OPERAND_RELATIVE (0400)
 
 /* This operand is an absolute branch address.  The disassembler
    prints these symbolically if possible.  */
-#define PPC_OPERAND_ABSOLUTE (0400)
+#define PPC_OPERAND_ABSOLUTE (01000)
 
 /* This operand is optional, and is zero if omitted.  This is used for
-   the optional BF and L fields in the comparison instructions.  The
+   example, in the optional BF field in the comparison instructions.  The
    assembler must count the number of operands remaining on the line,
    and the number of operands remaining for the opcode, and decide
    whether this operand is present or not.  The disassembler should
    print this operand out only if it is not zero.  */
-#define PPC_OPERAND_OPTIONAL (01000)
+#define PPC_OPERAND_OPTIONAL (02000)
 
 /* This flag is only used with PPC_OPERAND_OPTIONAL.  If this operand
    is omitted, then for the next operand use this operand value plus
@@ -259,24 +274,24 @@ extern const struct powerpc_operand powerpc_operands[];
    hack is needed because the Power rotate instructions can take
    either 4 or 5 operands.  The disassembler should print this operand
    out regardless of the PPC_OPERAND_OPTIONAL field.  */
-#define PPC_OPERAND_NEXT (02000)
+#define PPC_OPERAND_NEXT (04000)
 
 /* This operand should be regarded as a negative number for the
    purposes of overflow checking (i.e., the normal most negative
    number is disallowed and one more than the normal most positive
    number is allowed).  This flag will only be set for a signed
    operand.  */
-#define PPC_OPERAND_NEGATIVE (04000)
+#define PPC_OPERAND_NEGATIVE (010000)
 
 /* This operand names a vector unit register.  The disassembler
    prints these with a leading 'v'.  */
-#define PPC_OPERAND_VR (010000)
+#define PPC_OPERAND_VR (020000)
 
 /* This operand is for the DS field in a DS form instruction.  */
-#define PPC_OPERAND_DS (020000)
+#define PPC_OPERAND_DS (040000)
 
 /* This operand is for the DQ field in a DQ form instruction.  */
-#define PPC_OPERAND_DQ (040000)
+#define PPC_OPERAND_DQ (0100000)
 \f
 /* The POWER and PowerPC assemblers use a few macros.  We keep them
    with the operands table for simplicity.  The macro table is an
diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c
new file mode 100644 (file)
index 0000000..ee929c6
--- /dev/null
@@ -0,0 +1,248 @@
+/* Disassemble SPU instructions
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License along
+   with this program; if not, write to the Free Software Foundation, Inc.,
+   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+#include <linux/string.h>
+#include "nonstdio.h"
+#include "ansidecl.h"
+#include "spu.h"
+#include "dis-asm.h"
+
+/* This file provides a disassembler function which uses
+   the disassembler interface defined in dis-asm.h.   */
+
+extern const struct spu_opcode spu_opcodes[];
+extern const int spu_num_opcodes;
+
+#define SPU_DISASM_TBL_SIZE (1 << 11)
+static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE];
+
+static void
+init_spu_disassemble (void)
+{
+  int i;
+
+  /* If two instructions have the same opcode then we prefer the first
+   * one.  In most cases it is just an alternate mnemonic. */
+  for (i = 0; i < spu_num_opcodes; i++)
+    {
+      int o = spu_opcodes[i].opcode;
+      if (o >= SPU_DISASM_TBL_SIZE)
+       continue; /* abort (); */
+      if (spu_disassemble_table[o] == 0)
+       spu_disassemble_table[o] = &spu_opcodes[i];
+    }
+}
+
+/* Determine the instruction from the 10 least significant bits. */
+static const struct spu_opcode *
+get_index_for_opcode (unsigned int insn)
+{
+  const struct spu_opcode *index;
+  unsigned int opcode = insn >> (32-11);
+
+  /* Init the table.  This assumes that element 0/opcode 0 (currently
+   * NOP) is always used */
+  if (spu_disassemble_table[0] == 0)
+    init_spu_disassemble ();
+
+  if ((index = spu_disassemble_table[opcode & 0x780]) != 0
+      && index->insn_type == RRR)
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0
+      && (index->insn_type == RI18 || index->insn_type == LBT))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0
+      && index->insn_type == RI10)
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0
+      && (index->insn_type == RI16))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0
+      && (index->insn_type == RI8))
+    return index;
+
+  if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0)
+    return index;
+
+  return 0;
+}
+
+/* Print a Spu instruction.  */
+
+int
+print_insn_spu (unsigned long insn, unsigned long memaddr)
+{
+  int value;
+  int hex_value;
+  const struct spu_opcode *index;
+  enum spu_insns tag;
+
+  index = get_index_for_opcode (insn);
+
+  if (index == 0)
+    {
+      printf(".long 0x%x", insn);
+    }
+  else
+    {
+      int i;
+      int paren = 0;
+      tag = (enum spu_insns)(index - spu_opcodes);
+      printf("%s", index->mnemonic);
+      if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED
+         || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ
+          || tag == M_SYNC || tag == M_HBR)
+       {
+         int fb = (insn >> (32-18)) & 0x7f;
+         if (fb & 0x40)
+           printf(tag == M_SYNC ? "c" : "p");
+         if (fb & 0x20)
+           printf("d");
+         if (fb & 0x10)
+           printf("e");
+       }
+      if (index->arg[0] != 0)
+       printf("\t");
+      hex_value = 0;
+      for (i = 1;  i <= index->arg[0]; i++)
+       {
+         int arg = index->arg[i];
+         if (arg != A_P && !paren && i > 1)
+           printf(",");
+
+         switch (arg)
+           {
+           case A_T:
+             printf("$%d",
+                                    DECODE_INSN_RT (insn));
+             break;
+           case A_A:
+             printf("$%d",
+                                    DECODE_INSN_RA (insn));
+             break;
+           case A_B:
+             printf("$%d",
+                                    DECODE_INSN_RB (insn));
+             break;
+           case A_C:
+             printf("$%d",
+                                    DECODE_INSN_RC (insn));
+             break;
+           case A_S:
+             printf("$sp%d",
+                                    DECODE_INSN_RA (insn));
+             break;
+           case A_H:
+             printf("$ch%d",
+                                    DECODE_INSN_RA (insn));
+             break;
+           case A_P:
+             paren++;
+             printf("(");
+             break;
+           case A_U7A:
+             printf("%d",
+                                    173 - DECODE_INSN_U8 (insn));
+             break;
+           case A_U7B:
+             printf("%d",
+                                    155 - DECODE_INSN_U8 (insn));
+             break;
+           case A_S3:
+           case A_S6:
+           case A_S7:
+           case A_S7N:
+           case A_U3:
+           case A_U5:
+           case A_U6:
+           case A_U7:
+             hex_value = DECODE_INSN_I7 (insn);
+             printf("%d", hex_value);
+             break;
+           case A_S11:
+             print_address(memaddr + DECODE_INSN_I9a (insn) * 4);
+             break;
+           case A_S11I:
+             print_address(memaddr + DECODE_INSN_I9b (insn) * 4);
+             break;
+           case A_S10:
+           case A_S10B:
+             hex_value = DECODE_INSN_I10 (insn);
+             printf("%d", hex_value);
+             break;
+           case A_S14:
+             hex_value = DECODE_INSN_I10 (insn) * 16;
+             printf("%d", hex_value);
+             break;
+           case A_S16:
+             hex_value = DECODE_INSN_I16 (insn);
+             printf("%d", hex_value);
+             break;
+           case A_X16:
+             hex_value = DECODE_INSN_U16 (insn);
+             printf("%u", hex_value);
+             break;
+           case A_R18:
+             value = DECODE_INSN_I16 (insn) * 4;
+             if (value == 0)
+               printf("%d", value);
+             else
+               {
+                 hex_value = memaddr + value;
+                 print_address(hex_value & 0x3ffff);
+               }
+             break;
+           case A_S18:
+             value = DECODE_INSN_U16 (insn) * 4;
+             if (value == 0)
+               printf("%d", value);
+             else
+               print_address(value);
+             break;
+           case A_U18:
+             value = DECODE_INSN_U18 (insn);
+             if (value == 0 || 1)
+               {
+                 hex_value = value;
+                 printf("%u", value);
+               }
+             else
+               print_address(value);
+             break;
+           case A_U14:
+             hex_value = DECODE_INSN_U14 (insn);
+             printf("%u", hex_value);
+             break;
+           }
+         if (arg != A_P && paren)
+           {
+             printf(")");
+             paren--;
+           }
+       }
+      if (hex_value > 16)
+       printf("\t# %x", hex_value);
+    }
+  return 4;
+}
diff --git a/arch/powerpc/xmon/spu-insns.h b/arch/powerpc/xmon/spu-insns.h
new file mode 100644 (file)
index 0000000..99dc452
--- /dev/null
@@ -0,0 +1,410 @@
+/* SPU ELF support for BFD.
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of BFD, the Binary File Descriptor library.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software Foundation,
+   Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+/* SPU Opcode Table
+
+-=-=-= FORMAT =-=-=-
+                                                                                                    
+       +----+-------+-------+-------+-------+                         +------------+-------+-------+-------+
+RRR    | op |  RC   |  RB   |  RA   |  RT   |          RI7    | op         |  I7   |  RA   |  RT   |
+       +----+-------+-------+-------+-------+                 +------------+-------+-------+-------+
+        0  3       1       1       2       3                   0          1       1       2       3 
+                   0       7       4       1                              0       7       4       1 
+
+       +-----------+--------+-------+-------+                 +---------+----------+-------+-------+
+RI8    | op        |   I8   |  RA   |  RT   |          RI10   | op      |   I10    |  RA   |  RT   |
+       +-----------+--------+-------+-------+                 +---------+----------+-------+-------+
+        0         9        1       2       3                   0       7          1       2       3 
+                           7       4       1                                      7       4       1 
+
+       +----------+-----------------+-------+                 +--------+-------------------+-------+
+RI16   | op       |       I16       |  RT   |          RI18   | op     |       I18         |  RT   |
+       +----------+-----------------+-------+                 +--------+-------------------+-------+
+        0        8                 2       3                   0      6                   2       3 
+                                   4       1                                              4       1 
+
+       +------------+-------+-------+-------+                 +-------+--+-----------------+-------+
+RR     | op         |  RB   |  RA   |  RT   |          LBT    | op    |RO|       I16       |  RO   |
+       +------------+-------+-------+-------+                 +-------+--+-----------------+-------+
+        0          1       1       2       3                   0     6  8                 2       3 
+                   0       7       4       1                                              4       1 
+
+                                                              +------------+----+--+-------+-------+
+                                                       LBTI   | op         | // |RO|  RA   |  RO   |
+                                                              +------------+----+--+-------+-------+
+                                                               0          1    1  1       2       3
+                                                                          0    5  7       4       1
+
+-=-=-= OPCODE =-=-=-
+
+OPCODE field specifies the most significant 11bit of the instruction. Some formats don't have 11bits for opcode field, and in this
+case, bit field other than op are defined as 0s. For example, opcode of fma instruction which is RRR format is defined as 0x700,
+since 0x700 -> 11'b11100000000, this means opcode is 4'b1110, and other 7bits are defined as 7'b0000000.
+
+-=-=-= ASM_FORMAT =-=-=-
+
+RRR category                                           RI7 category                               
+       ASM_RRR         mnemonic RC, RA, RB, RT                 ASM_RI4         mnemonic RT, RA, I4
+                                                               ASM_RI7         mnemonic RT, RA, I7
+
+RI8 category                                           RI10 category                               
+       ASM_RUI8        mnemonic RT, RA, UI8                    ASM_AI10        mnemonic RA, I10    
+                                                               ASM_RI10        mnemonic RT, RA, R10
+                                                               ASM_RI10IDX     mnemonic RT, I10(RA)
+
+RI16 category                                          RI18 category                           
+       ASM_I16W        mnemonic I16W                           ASM_RI18        mnemonic RT, I18
+       ASM_RI16        mnemonic RT, I16
+       ASM_RI16W       mnemonic RT, I16W
+
+RR category                                            LBT category                                    
+       ASM_MFSPR       mnemonic RT, SA                         ASM_LBT         mnemonic brinst, brtarg 
+       ASM_MTSPR       mnemonic SA, RT                                                                 
+       ASM_NOOP        mnemonic                        LBTI category                                   
+       ASM_RA          mnemonic RA                             ASM_LBTI        mnemonic brinst, RA     
+       ASM_RAB         mnemonic RA, RB
+       ASM_RDCH        mnemonic RT, CA
+       ASM_RR          mnemonic RT, RA, RB
+       ASM_RT          mnemonic RT
+       ASM_RTA         mnemonic RT, RA
+       ASM_WRCH        mnemonic CA, RT
+
+Note that RRR instructions have the names for RC and RT reversed from
+what's in the ISA, in order to put RT in the same position it appears
+for other formats.
+
+-=-=-= DEPENDENCY =-=-=-
+
+DEPENDENCY filed consists of 5 digits. This represents which register is used as source and which register is used as target.
+The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits.
+If the digit is 0, this means the corresponding register is not used in the instruction.
+If the digit is 1, this means the corresponding register is used as a source in the instruction.
+If the digit is 2, this means the corresponding register is used as a target in the instruction.
+If the digit is 3, this means the corresponding register is used as both source and target in the instruction.
+For example, fms instruction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are
+used as sources and RT is the target.
+
+-=-=-= PIPE =-=-=-
+
+This field shows which execution pipe is used for the instruction
+
+pipe0 execution pipelines:
+       FP6     SP floating pipeline
+       FP7     integer operations executed in SP floating pipeline
+       FPD     DP floating pipeline
+       FX2     FXU pipeline
+       FX3     Rotate/Shift pipeline
+       FXB     Byte pipeline
+       NOP     No pipeline
+
+pipe1 execution pipelines:
+       BR      Branch pipeline
+       LNOP    No pipeline
+       LS      Load/Store pipeline
+       SHUF    Shuffle pipeline
+       SPR     SPR/CH pipeline
+
+*/
+
+#define _A0() {0}
+#define _A1(a) {1,a}
+#define _A2(a,b) {2,a,b}
+#define _A3(a,b,c) {3,a,b,c}
+#define _A4(a,b,c,d) {4,a,b,c,d}
+
+/*    TAG              FORMAT  OPCODE  MNEMONIC        ASM_FORMAT      DEPENDENCY      PIPE    COMMENT                         */
+/*                                                                     0[RC][RB][RA][RT]                                       */
+/*                                                                     1:src, 2:target                                         */
+
+APUOP(M_BR,            RI16,   0x190,  "br",           _A1(A_R18),     00000,  BR)     /* BRel          IP<-IP+I16 */
+APUOP(M_BRSL,          RI16,   0x198,  "brsl",         _A2(A_T,A_R18), 00002,  BR)     /* BRelSetLink   RT,IP<-IP,IP+I16 */
+APUOP(M_BRA,           RI16,   0x180,  "bra",          _A1(A_S18),     00000,  BR)     /* BRAbs         IP<-I16 */
+APUOP(M_BRASL,         RI16,   0x188,  "brasl",        _A2(A_T,A_S18), 00002,  BR)     /* BRAbsSetLink  RT,IP<-IP,I16 */
+APUOP(M_FSMBI,         RI16,   0x194,  "fsmbi",        _A2(A_T,A_X16), 00002,  SHUF)   /* FormSelMask%I RT<-fsm(I16) */
+APUOP(M_LQA,           RI16,   0x184,  "lqa",          _A2(A_T,A_S18), 00002,  LS)     /* LoadQAbs      RT<-M[I16] */
+APUOP(M_LQR,           RI16,   0x19C,  "lqr",          _A2(A_T,A_R18), 00002,  LS)     /* LoadQRel      RT<-M[IP+I16] */
+APUOP(M_STOP,          RR,     0x000,  "stop",         _A0(),          00000,  BR)     /* STOP          stop */
+APUOP(M_STOP2,         RR,     0x000,  "stop",         _A1(A_U14),     00000,  BR)     /* STOP          stop */
+APUOP(M_STOPD,         RR,     0x140,  "stopd",        _A3(A_T,A_A,A_B),         00111,        BR)     /* STOPD         stop (with register dependencies) */
+APUOP(M_LNOP,          RR,     0x001,  "lnop",         _A0(),          00000,  LNOP)   /* LNOP          no_operation */
+APUOP(M_SYNC,          RR,     0x002,  "sync",         _A0(),          00000,  BR)     /* SYNC          flush_pipe */
+APUOP(M_DSYNC,         RR,     0x003,  "dsync",        _A0(),          00000,  BR)     /* DSYNC         flush_store_queue */
+APUOP(M_MFSPR,         RR,     0x00c,  "mfspr",        _A2(A_T,A_S),   00002,  SPR)    /* MFSPR         RT<-SA */
+APUOP(M_RDCH,          RR,     0x00d,  "rdch",         _A2(A_T,A_H),   00002,  SPR)    /* ReaDCHannel   RT<-CA:data */
+APUOP(M_RCHCNT,                RR,     0x00f,  "rchcnt",       _A2(A_T,A_H),   00002,  SPR)    /* ReaDCHanCouNT RT<-CA:count */
+APUOP(M_HBRA,          LBT,    0x080,  "hbra",         _A2(A_S11,A_S18),       00000,  LS)     /* HBRA          BTB[B9]<-M[I16] */
+APUOP(M_HBRR,          LBT,    0x090,  "hbrr",         _A2(A_S11,A_R18),       00000,  LS)     /* HBRR          BTB[B9]<-M[IP+I16] */
+APUOP(M_BRZ,           RI16,   0x100,  "brz",          _A2(A_T,A_R18), 00001,  BR)     /* BRZ           IP<-IP+I16_if(RT) */
+APUOP(M_BRNZ,          RI16,   0x108,  "brnz",         _A2(A_T,A_R18), 00001,  BR)     /* BRNZ          IP<-IP+I16_if(RT) */
+APUOP(M_BRHZ,          RI16,   0x110,  "brhz",         _A2(A_T,A_R18), 00001,  BR)     /* BRHZ          IP<-IP+I16_if(RT) */
+APUOP(M_BRHNZ,         RI16,   0x118,  "brhnz",        _A2(A_T,A_R18), 00001,  BR)     /* BRHNZ         IP<-IP+I16_if(RT) */
+APUOP(M_STQA,          RI16,   0x104,  "stqa",         _A2(A_T,A_S18), 00001,  LS)     /* SToreQAbs     M[I16]<-RT */
+APUOP(M_STQR,          RI16,   0x11C,  "stqr",         _A2(A_T,A_R18), 00001,  LS)     /* SToreQRel     M[IP+I16]<-RT */
+APUOP(M_MTSPR,         RR,     0x10c,  "mtspr",        _A2(A_S,A_T),   00001,  SPR)    /* MTSPR         SA<-RT */
+APUOP(M_WRCH,          RR,     0x10d,  "wrch",         _A2(A_H,A_T),   00001,  SPR)    /* ChanWRite     CA<-RT */
+APUOP(M_LQD,           RI10,   0x1a0,  "lqd",          _A4(A_T,A_S14,A_P,A_A), 00012,  LS)     /* LoadQDisp     RT<-M[Ra+I10] */
+APUOP(M_BI,            RR,     0x1a8,  "bi",           _A1(A_A),               00010,  BR)     /* BI            IP<-RA */
+APUOP(M_BISL,          RR,     0x1a9,  "bisl",         _A2(A_T,A_A),   00012,  BR)     /* BISL          RT,IP<-IP,RA */
+APUOP(M_IRET,                  RR,     0x1aa,  "iret",         _A1(A_A),       00010,  BR)     /* IRET          IP<-SRR0 */
+APUOP(M_IRET2,                 RR,     0x1aa,  "iret",         _A0(),          00010,  BR)     /* IRET          IP<-SRR0 */
+APUOP(M_BISLED,                RR,     0x1ab,  "bisled",       _A2(A_T,A_A),   00012,  BR)     /* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOP(M_HBR,           LBTI,   0x1ac,  "hbr",          _A2(A_S11I,A_A),        00010,  LS)     /* HBR           BTB[B9]<-M[Ra] */
+APUOP(M_FREST,         RR,     0x1b8,  "frest",        _A2(A_T,A_A),   00012,  SHUF)   /* FREST         RT<-recip(RA) */
+APUOP(M_FRSQEST,       RR,     0x1b9,  "frsqest",      _A2(A_T,A_A),   00012,  SHUF)   /* FRSQEST       RT<-rsqrt(RA) */
+APUOP(M_FSM,           RR,     0x1b4,  "fsm",          _A2(A_T,A_A),   00012,  SHUF)   /* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_FSMH,          RR,     0x1b5,  "fsmh",         _A2(A_T,A_A),   00012,  SHUF)   /* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_FSMB,          RR,     0x1b6,  "fsmb",         _A2(A_T,A_A),   00012,  SHUF)   /* FormSelMask%  RT<-expand(Ra) */
+APUOP(M_GB,            RR,     0x1b0,  "gb",           _A2(A_T,A_A),   00012,  SHUF)   /* GatherBits%   RT<-gather(RA) */
+APUOP(M_GBH,           RR,     0x1b1,  "gbh",          _A2(A_T,A_A),   00012,  SHUF)   /* GatherBits%   RT<-gather(RA) */
+APUOP(M_GBB,           RR,     0x1b2,  "gbb",          _A2(A_T,A_A),   00012,  SHUF)   /* GatherBits%   RT<-gather(RA) */
+APUOP(M_CBD,           RI7,    0x1f4,  "cbd",          _A4(A_T,A_U7,A_P,A_A),  00012,  SHUF)   /* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CHD,           RI7,    0x1f5,  "chd",          _A4(A_T,A_U7,A_P,A_A),  00012,  SHUF)   /* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CWD,           RI7,    0x1f6,  "cwd",          _A4(A_T,A_U7,A_P,A_A),  00012,  SHUF)   /* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_CDD,           RI7,    0x1f7,  "cdd",          _A4(A_T,A_U7,A_P,A_A),  00012,  SHUF)   /* genCtl%%insD  RT<-sta(Ra+I4,siz) */
+APUOP(M_ROTQBII,       RI7,    0x1f8,  "rotqbii",      _A3(A_T,A_A,A_U3),      00012,  SHUF)   /* ROTQBII       RT<-RA<<<I7 */
+APUOP(M_ROTQBYI,       RI7,    0x1fc,  "rotqbyi",      _A3(A_T,A_A,A_S7N),     00012,  SHUF)   /* ROTQBYI       RT<-RA<<<(I7*8) */
+APUOP(M_ROTQMBII,      RI7,    0x1f9,  "rotqmbii",     _A3(A_T,A_A,A_S3),      00012,  SHUF)   /* ROTQMBII      RT<-RA<<I7 */
+APUOP(M_ROTQMBYI,      RI7,    0x1fd,  "rotqmbyi",     _A3(A_T,A_A,A_S6),      00012,  SHUF)   /* ROTQMBYI      RT<-RA<<I7 */
+APUOP(M_SHLQBII,       RI7,    0x1fb,  "shlqbii",      _A3(A_T,A_A,A_U3),      00012,  SHUF)   /* SHLQBII       RT<-RA<<I7 */
+APUOP(M_SHLQBYI,       RI7,    0x1ff,  "shlqbyi",      _A3(A_T,A_A,A_U5),      00012,  SHUF)   /* SHLQBYI       RT<-RA<<I7 */
+APUOP(M_STQD,          RI10,   0x120,  "stqd",         _A4(A_T,A_S14,A_P,A_A), 00011,  LS)     /* SToreQDisp    M[Ra+I10]<-RT */
+APUOP(M_BIHNZ,         RR,     0x12b,  "bihnz",        _A2(A_T,A_A),   00011,  BR)     /* BIHNZ         IP<-RA_if(RT) */
+APUOP(M_BIHZ,          RR,     0x12a,  "bihz",         _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOP(M_BINZ,          RR,     0x129,  "binz",         _A2(A_T,A_A),   00011,  BR)     /* BINZ          IP<-RA_if(RT) */
+APUOP(M_BIZ,           RR,     0x128,  "biz",          _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+APUOP(M_CBX,           RR,     0x1d4,  "cbx",          _A3(A_T,A_A,A_B),               00112,  SHUF)   /* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CHX,           RR,     0x1d5,  "chx",          _A3(A_T,A_A,A_B),               00112,  SHUF)   /* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CWX,           RR,     0x1d6,  "cwx",          _A3(A_T,A_A,A_B),               00112,  SHUF)   /* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_CDX,           RR,     0x1d7,  "cdx",          _A3(A_T,A_A,A_B),               00112,  SHUF)   /* genCtl%%insX  RT<-sta(Ra+Rb,siz) */
+APUOP(M_LQX,           RR,     0x1c4,  "lqx",          _A3(A_T,A_A,A_B),               00112,  LS)     /* LoadQindeX    RT<-M[Ra+Rb] */
+APUOP(M_ROTQBI,                RR,     0x1d8,  "rotqbi",       _A3(A_T,A_A,A_B),               00112,  SHUF)   /* ROTQBI        RT<-RA<<<Rb */
+APUOP(M_ROTQMBI,       RR,     0x1d9,  "rotqmbi",      _A3(A_T,A_A,A_B),               00112,  SHUF)   /* ROTQMBI       RT<-RA<<Rb */
+APUOP(M_SHLQBI,                RR,     0x1db,  "shlqbi",       _A3(A_T,A_A,A_B),               00112,  SHUF)   /* SHLQBI        RT<-RA<<Rb */
+APUOP(M_ROTQBY,                RR,     0x1dc,  "rotqby",       _A3(A_T,A_A,A_B),               00112,          SHUF)   /* ROTQBY        RT<-RA<<<(Rb*8) */
+APUOP(M_ROTQMBY,       RR,     0x1dd,  "rotqmby",      _A3(A_T,A_A,A_B),               00112,          SHUF)   /* ROTQMBY       RT<-RA<<Rb */
+APUOP(M_SHLQBY,                RR,     0x1df,  "shlqby",       _A3(A_T,A_A,A_B),               00112,  SHUF)   /* SHLQBY        RT<-RA<<Rb */
+APUOP(M_ROTQBYBI,      RR,     0x1cc,  "rotqbybi",     _A3(A_T,A_A,A_B),               00112,          SHUF)   /* ROTQBYBI      RT<-RA<<Rb */
+APUOP(M_ROTQMBYBI,     RR,     0x1cd,  "rotqmbybi",    _A3(A_T,A_A,A_B),               00112,          SHUF)   /* ROTQMBYBI     RT<-RA<<Rb */
+APUOP(M_SHLQBYBI,      RR,     0x1cf,  "shlqbybi",     _A3(A_T,A_A,A_B),               00112,  SHUF)   /* SHLQBYBI      RT<-RA<<Rb */
+APUOP(M_STQX,          RR,     0x144,  "stqx",         _A3(A_T,A_A,A_B),               00111,  LS)     /* SToreQindeX   M[Ra+Rb]<-RT */
+APUOP(M_SHUFB,         RRR,    0x580,  "shufb",        _A4(A_C,A_A,A_B,A_T),   02111,  SHUF)   /* SHUFfleBytes  RC<-f(RA,RB,RT) */
+APUOP(M_IL,            RI16,   0x204,  "il",           _A2(A_T,A_S16), 00002,  FX2)    /* ImmLoad       RT<-sxt(I16) */
+APUOP(M_ILH,           RI16,   0x20c,  "ilh",          _A2(A_T,A_X16), 00002,  FX2)    /* ImmLoadH      RT<-I16 */
+APUOP(M_ILHU,          RI16,   0x208,  "ilhu",         _A2(A_T,A_X16), 00002,  FX2)    /* ImmLoadHUpper RT<-I16<<16 */
+APUOP(M_ILA,           RI18,   0x210,  "ila",          _A2(A_T,A_U18), 00002,  FX2)    /* ImmLoadAddr   RT<-zxt(I18) */
+APUOP(M_NOP,           RR,     0x201,  "nop",          _A1(A_T),               00000,  NOP)    /* XNOP          no_operation */
+APUOP(M_NOP2,          RR,     0x201,  "nop",          _A0(),          00000,  NOP)    /* XNOP          no_operation */
+APUOP(M_IOHL,          RI16,   0x304,  "iohl",         _A2(A_T,A_X16), 00003,  FX2)    /* AddImmeXt     RT<-RT+sxt(I16) */
+APUOP(M_ANDBI,         RI10,   0x0b0,  "andbi",        _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* AND%I         RT<-RA&I10 */
+APUOP(M_ANDHI,         RI10,   0x0a8,  "andhi",        _A3(A_T,A_A,A_S10),     00012,  FX2)    /* AND%I         RT<-RA&I10 */
+APUOP(M_ANDI,          RI10,   0x0a0,  "andi",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* AND%I         RT<-RA&I10 */
+APUOP(M_ORBI,          RI10,   0x030,  "orbi",         _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* OR%I          RT<-RA|I10 */
+APUOP(M_ORHI,          RI10,   0x028,  "orhi",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* OR%I          RT<-RA|I10 */
+APUOP(M_ORI,           RI10,   0x020,  "ori",          _A3(A_T,A_A,A_S10),     00012,  FX2)    /* OR%I          RT<-RA|I10 */
+APUOP(M_ORX,           RR,     0x1f0,  "orx",          _A2(A_T,A_A),           00012,  BR)     /* ORX           RT<-RA.w0|RA.w1|RA.w2|RA.w3 */
+APUOP(M_XORBI,         RI10,   0x230,  "xorbi",        _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* XOR%I         RT<-RA^I10 */
+APUOP(M_XORHI,         RI10,   0x228,  "xorhi",        _A3(A_T,A_A,A_S10),     00012,  FX2)    /* XOR%I         RT<-RA^I10 */
+APUOP(M_XORI,          RI10,   0x220,  "xori",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* XOR%I         RT<-RA^I10 */
+APUOP(M_AHI,           RI10,   0x0e8,  "ahi",          _A3(A_T,A_A,A_S10),     00012,  FX2)    /* Add%Immed     RT<-RA+I10 */
+APUOP(M_AI,            RI10,   0x0e0,  "ai",           _A3(A_T,A_A,A_S10),     00012,  FX2)    /* Add%Immed     RT<-RA+I10 */
+APUOP(M_SFHI,          RI10,   0x068,  "sfhi",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* SubFrom%Imm   RT<-I10-RA */
+APUOP(M_SFI,           RI10,   0x060,  "sfi",          _A3(A_T,A_A,A_S10),     00012,  FX2)    /* SubFrom%Imm   RT<-I10-RA */
+APUOP(M_CGTBI,         RI10,   0x270,  "cgtbi",        _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* CGT%I         RT<-(RA>I10) */
+APUOP(M_CGTHI,         RI10,   0x268,  "cgthi",        _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CGT%I         RT<-(RA>I10) */
+APUOP(M_CGTI,          RI10,   0x260,  "cgti",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CGT%I         RT<-(RA>I10) */
+APUOP(M_CLGTBI,                RI10,   0x2f0,  "clgtbi",       _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CLGTHI,                RI10,   0x2e8,  "clgthi",       _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CLGTI,         RI10,   0x2e0,  "clgti",        _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CLGT%I        RT<-(RA>I10) */
+APUOP(M_CEQBI,         RI10,   0x3f0,  "ceqbi",        _A3(A_T,A_A,A_S10B),    00012,  FX2)    /* CEQ%I         RT<-(RA=I10) */
+APUOP(M_CEQHI,         RI10,   0x3e8,  "ceqhi",        _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CEQ%I         RT<-(RA=I10) */
+APUOP(M_CEQI,          RI10,   0x3e0,  "ceqi",         _A3(A_T,A_A,A_S10),     00012,  FX2)    /* CEQ%I         RT<-(RA=I10) */
+APUOP(M_HGTI,          RI10,   0x278,  "hgti",         _A3(A_T,A_A,A_S10),     00010,  FX2)    /* HaltGTI       halt_if(RA>I10) */
+APUOP(M_HGTI2,         RI10,   0x278,  "hgti",         _A2(A_A,A_S10), 00010,  FX2)    /* HaltGTI       halt_if(RA>I10) */
+APUOP(M_HLGTI,         RI10,   0x2f8,  "hlgti",        _A3(A_T,A_A,A_S10),     00010,  FX2)    /* HaltLGTI      halt_if(RA>I10) */
+APUOP(M_HLGTI2,                RI10,   0x2f8,  "hlgti",        _A2(A_A,A_S10), 00010,  FX2)    /* HaltLGTI      halt_if(RA>I10) */
+APUOP(M_HEQI,          RI10,   0x3f8,  "heqi",         _A3(A_T,A_A,A_S10),     00010,  FX2)    /* HaltEQImm     halt_if(RA=I10) */
+APUOP(M_HEQI2,         RI10,   0x3f8,  "heqi",         _A2(A_A,A_S10), 00010,  FX2)    /* HaltEQImm     halt_if(RA=I10) */
+APUOP(M_MPYI,          RI10,   0x3a0,  "mpyi",         _A3(A_T,A_A,A_S10),     00012,  FP7)    /* MPYI          RT<-RA*I10 */
+APUOP(M_MPYUI,         RI10,   0x3a8,  "mpyui",        _A3(A_T,A_A,A_S10),     00012,  FP7)    /* MPYUI         RT<-RA*I10 */
+APUOP(M_CFLTS,         RI8,    0x3b0,  "cflts",        _A3(A_T,A_A,A_U7A),     00012,  FP7)    /* CFLTS         RT<-int(RA,I8) */
+APUOP(M_CFLTU,         RI8,    0x3b2,  "cfltu",        _A3(A_T,A_A,A_U7A),     00012,  FP7)    /* CFLTU         RT<-int(RA,I8) */
+APUOP(M_CSFLT,         RI8,    0x3b4,  "csflt",        _A3(A_T,A_A,A_U7B),     00012,  FP7)    /* CSFLT         RT<-flt(RA,I8) */
+APUOP(M_CUFLT,         RI8,    0x3b6,  "cuflt",        _A3(A_T,A_A,A_U7B),     00012,  FP7)    /* CUFLT         RT<-flt(RA,I8) */
+APUOP(M_FESD,          RR,     0x3b8,  "fesd",         _A2(A_T,A_A),   00012,  FPD)    /* FESD          RT<-double(RA) */
+APUOP(M_FRDS,          RR,     0x3b9,  "frds",         _A2(A_T,A_A),   00012,  FPD)    /* FRDS          RT<-single(RA) */
+APUOP(M_FSCRRD,                RR,     0x398,  "fscrrd",       _A1(A_T),               00002,  FPD)    /* FSCRRD        RT<-FP_status */
+APUOP(M_FSCRWR,                RR,     0x3ba,  "fscrwr",       _A2(A_T,A_A),   00010,  FP7)    /* FSCRWR        FP_status<-RA */
+APUOP(M_FSCRWR2,       RR,     0x3ba,  "fscrwr",       _A1(A_A),               00010,  FP7)    /* FSCRWR        FP_status<-RA */
+APUOP(M_CLZ,           RR,     0x2a5,  "clz",          _A2(A_T,A_A),   00012,  FX2)    /* CLZ           RT<-clz(RA) */
+APUOP(M_CNTB,          RR,     0x2b4,  "cntb",         _A2(A_T,A_A),   00012,  FXB)    /* CNT           RT<-pop(RA) */
+APUOP(M_XSBH,          RR,     0x2b6,  "xsbh",         _A2(A_T,A_A),   00012,  FX2)    /* eXtSignBtoH   RT<-sign_ext(RA) */
+APUOP(M_XSHW,          RR,     0x2ae,  "xshw",         _A2(A_T,A_A),   00012,  FX2)    /* eXtSignHtoW   RT<-sign_ext(RA) */
+APUOP(M_XSWD,          RR,     0x2a6,  "xswd",         _A2(A_T,A_A),   00012,  FX2)    /* eXtSignWtoD   RT<-sign_ext(RA) */
+APUOP(M_ROTI,          RI7,    0x078,  "roti",         _A3(A_T,A_A,A_S7N),     00012,  FX3)    /* ROT%I         RT<-RA<<<I7 */
+APUOP(M_ROTMI,         RI7,    0x079,  "rotmi",        _A3(A_T,A_A,A_S7),      00012,  FX3)    /* ROT%MI        RT<-RA<<I7 */
+APUOP(M_ROTMAI,                RI7,    0x07a,  "rotmai",       _A3(A_T,A_A,A_S7),      00012,  FX3)    /* ROTMA%I       RT<-RA<<I7 */
+APUOP(M_SHLI,          RI7,    0x07b,  "shli",         _A3(A_T,A_A,A_U6),      00012,  FX3)    /* SHL%I         RT<-RA<<I7 */
+APUOP(M_ROTHI,         RI7,    0x07c,  "rothi",        _A3(A_T,A_A,A_S7N),     00012,  FX3)    /* ROT%I         RT<-RA<<<I7 */
+APUOP(M_ROTHMI,                RI7,    0x07d,  "rothmi",       _A3(A_T,A_A,A_S6),      00012,  FX3)    /* ROT%MI        RT<-RA<<I7 */
+APUOP(M_ROTMAHI,       RI7,    0x07e,  "rotmahi",      _A3(A_T,A_A,A_S6),      00012,  FX3)    /* ROTMA%I       RT<-RA<<I7 */
+APUOP(M_SHLHI,         RI7,    0x07f,  "shlhi",        _A3(A_T,A_A,A_U5),      00012,  FX3)    /* SHL%I         RT<-RA<<I7 */
+APUOP(M_A,             RR,     0x0c0,  "a",            _A3(A_T,A_A,A_B),               00112,  FX2)    /* Add%          RT<-RA+RB */
+APUOP(M_AH,            RR,     0x0c8,  "ah",           _A3(A_T,A_A,A_B),               00112,  FX2)    /* Add%          RT<-RA+RB */
+APUOP(M_SF,            RR,     0x040,  "sf",           _A3(A_T,A_A,A_B),               00112,  FX2)    /* SubFrom%      RT<-RB-RA */
+APUOP(M_SFH,           RR,     0x048,  "sfh",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* SubFrom%      RT<-RB-RA */
+APUOP(M_CGT,           RR,     0x240,  "cgt",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* CGT%          RT<-(RA>RB) */
+APUOP(M_CGTB,          RR,     0x250,  "cgtb",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* CGT%          RT<-(RA>RB) */
+APUOP(M_CGTH,          RR,     0x248,  "cgth",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* CGT%          RT<-(RA>RB) */
+APUOP(M_CLGT,          RR,     0x2c0,  "clgt",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* CLGT%         RT<-(RA>RB) */
+APUOP(M_CLGTB,         RR,     0x2d0,  "clgtb",        _A3(A_T,A_A,A_B),               00112,  FX2)    /* CLGT%         RT<-(RA>RB) */
+APUOP(M_CLGTH,         RR,     0x2c8,  "clgth",        _A3(A_T,A_A,A_B),               00112,  FX2)    /* CLGT%         RT<-(RA>RB) */
+APUOP(M_CEQ,           RR,     0x3c0,  "ceq",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* CEQ%          RT<-(RA=RB) */
+APUOP(M_CEQB,          RR,     0x3d0,  "ceqb",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* CEQ%          RT<-(RA=RB) */
+APUOP(M_CEQH,          RR,     0x3c8,  "ceqh",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* CEQ%          RT<-(RA=RB) */
+APUOP(M_HGT,           RR,     0x258,  "hgt",          _A3(A_T,A_A,A_B),               00110,  FX2)    /* HaltGT        halt_if(RA>RB) */
+APUOP(M_HGT2,          RR,     0x258,  "hgt",          _A2(A_A,A_B),   00110,  FX2)    /* HaltGT        halt_if(RA>RB) */
+APUOP(M_HLGT,          RR,     0x2d8,  "hlgt",         _A3(A_T,A_A,A_B),               00110,  FX2)    /* HaltLGT       halt_if(RA>RB) */
+APUOP(M_HLGT2,         RR,     0x2d8,  "hlgt",         _A2(A_A,A_B),   00110,  FX2)    /* HaltLGT       halt_if(RA>RB) */
+APUOP(M_HEQ,           RR,     0x3d8,  "heq",          _A3(A_T,A_A,A_B),               00110,  FX2)    /* HaltEQ        halt_if(RA=RB) */
+APUOP(M_HEQ2,          RR,     0x3d8,  "heq",          _A2(A_A,A_B),   00110,  FX2)    /* HaltEQ        halt_if(RA=RB) */
+APUOP(M_FCEQ,          RR,     0x3c2,  "fceq",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* FCEQ          RT<-(RA=RB) */
+APUOP(M_FCMEQ,         RR,     0x3ca,  "fcmeq",        _A3(A_T,A_A,A_B),               00112,  FX2)    /* FCMEQ         RT<-(|RA|=|RB|) */
+APUOP(M_FCGT,          RR,     0x2c2,  "fcgt",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* FCGT          RT<-(RA<RB) */
+APUOP(M_FCMGT,         RR,     0x2ca,  "fcmgt",        _A3(A_T,A_A,A_B),               00112,  FX2)    /* FCMGT         RT<-(|RA|<|RB|) */
+APUOP(M_AND,           RR,     0x0c1,  "and",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* AND           RT<-RA&RB */
+APUOP(M_NAND,          RR,     0x0c9,  "nand",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* NAND          RT<-!(RA&RB) */
+APUOP(M_OR,            RR,     0x041,  "or",           _A3(A_T,A_A,A_B),               00112,  FX2)    /* OR            RT<-RA|RB */
+APUOP(M_NOR,           RR,     0x049,  "nor",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* NOR           RT<-!(RA&RB) */
+APUOP(M_XOR,           RR,     0x241,  "xor",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* XOR           RT<-RA^RB */
+APUOP(M_EQV,           RR,     0x249,  "eqv",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* EQuiValent    RT<-!(RA^RB) */
+APUOP(M_ANDC,          RR,     0x2c1,  "andc",         _A3(A_T,A_A,A_B),               00112,  FX2)    /* ANDComplement RT<-RA&!RB */
+APUOP(M_ORC,           RR,     0x2c9,  "orc",          _A3(A_T,A_A,A_B),               00112,  FX2)    /* ORComplement  RT<-RA|!RB */
+APUOP(M_ABSDB,         RR,     0x053,  "absdb",        _A3(A_T,A_A,A_B),               00112,  FXB)    /* ABSoluteDiff  RT<-|RA-RB| */
+APUOP(M_AVGB,          RR,     0x0d3,  "avgb",         _A3(A_T,A_A,A_B),               00112,  FXB)    /* AVG%          RT<-(RA+RB+1)/2 */
+APUOP(M_SUMB,          RR,     0x253,  "sumb",         _A3(A_T,A_A,A_B),               00112,  FXB)    /* SUM%          RT<-f(RA,RB) */
+APUOP(M_DFA,           RR,     0x2cc,  "dfa",          _A3(A_T,A_A,A_B),               00112,  FPD)    /* DFAdd         RT<-RA+RB */
+APUOP(M_DFM,           RR,     0x2ce,  "dfm",          _A3(A_T,A_A,A_B),               00112,  FPD)    /* DFMul         RT<-RA*RB */
+APUOP(M_DFS,           RR,     0x2cd,  "dfs",          _A3(A_T,A_A,A_B),               00112,  FPD)    /* DFSub         RT<-RA-RB */
+APUOP(M_FA,            RR,     0x2c4,  "fa",           _A3(A_T,A_A,A_B),               00112,  FP6)    /* FAdd          RT<-RA+RB */
+APUOP(M_FM,            RR,     0x2c6,  "fm",           _A3(A_T,A_A,A_B),               00112,  FP6)    /* FMul          RT<-RA*RB */
+APUOP(M_FS,            RR,     0x2c5,  "fs",           _A3(A_T,A_A,A_B),               00112,  FP6)    /* FSub          RT<-RA-RB */
+APUOP(M_MPY,           RR,     0x3c4,  "mpy",          _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPY           RT<-RA*RB */
+APUOP(M_MPYH,          RR,     0x3c5,  "mpyh",         _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPYH          RT<-(RAh*RB)<<16 */
+APUOP(M_MPYHH,         RR,     0x3c6,  "mpyhh",        _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPYHH         RT<-RAh*RBh */
+APUOP(M_MPYHHU,                RR,     0x3ce,  "mpyhhu",       _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPYHHU        RT<-RAh*RBh */
+APUOP(M_MPYS,          RR,     0x3c7,  "mpys",         _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPYS          RT<-(RA*RB)>>16 */
+APUOP(M_MPYU,          RR,     0x3cc,  "mpyu",         _A3(A_T,A_A,A_B),               00112,  FP7)    /* MPYU          RT<-RA*RB */
+APUOP(M_FI,            RR,     0x3d4,  "fi",           _A3(A_T,A_A,A_B),               00112,  FP7)    /* FInterpolate  RT<-f(RA,RB) */
+APUOP(M_ROT,           RR,     0x058,  "rot",          _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROT%          RT<-RA<<<RB */
+APUOP(M_ROTM,          RR,     0x059,  "rotm",         _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROT%M         RT<-RA<<Rb */
+APUOP(M_ROTMA,         RR,     0x05a,  "rotma",        _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROTMA%        RT<-RA<<Rb */
+APUOP(M_SHL,           RR,     0x05b,  "shl",          _A3(A_T,A_A,A_B),               00112,  FX3)    /* SHL%          RT<-RA<<Rb */
+APUOP(M_ROTH,          RR,     0x05c,  "roth",         _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROT%          RT<-RA<<<RB */
+APUOP(M_ROTHM,         RR,     0x05d,  "rothm",        _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROT%M         RT<-RA<<Rb */
+APUOP(M_ROTMAH,                RR,     0x05e,  "rotmah",       _A3(A_T,A_A,A_B),               00112,  FX3)    /* ROTMA%        RT<-RA<<Rb */
+APUOP(M_SHLH,          RR,     0x05f,  "shlh",         _A3(A_T,A_A,A_B),               00112,  FX3)    /* SHL%          RT<-RA<<Rb */
+APUOP(M_MPYHHA,                RR,     0x346,  "mpyhha",       _A3(A_T,A_A,A_B),               00113,  FP7)    /* MPYHHA        RT<-RAh*RBh+RT */
+APUOP(M_MPYHHAU,       RR,     0x34e,  "mpyhhau",      _A3(A_T,A_A,A_B),               00113,  FP7)    /* MPYHHAU       RT<-RAh*RBh+RT */
+APUOP(M_DFMA,          RR,     0x35c,  "dfma",         _A3(A_T,A_A,A_B),               00113,  FPD)    /* DFMAdd        RT<-RT+RA*RB */
+APUOP(M_DFMS,          RR,     0x35d,  "dfms",         _A3(A_T,A_A,A_B),               00113,  FPD)    /* DFMSub        RT<-RA*RB-RT */
+APUOP(M_DFNMS,         RR,     0x35e,  "dfnms",        _A3(A_T,A_A,A_B),               00113,  FPD)    /* DFNMSub       RT<-RT-RA*RB */
+APUOP(M_DFNMA,         RR,     0x35f,  "dfnma",        _A3(A_T,A_A,A_B),               00113,  FPD)    /* DFNMAdd       RT<-(-RT)-RA*RB */
+APUOP(M_FMA,           RRR,    0x700,  "fma",          _A4(A_C,A_A,A_B,A_T),   02111,  FP6)    /* FMAdd         RC<-RT+RA*RB */
+APUOP(M_FMS,           RRR,    0x780,  "fms",          _A4(A_C,A_A,A_B,A_T),   02111,  FP6)    /* FMSub         RC<-RA*RB-RT */
+APUOP(M_FNMS,          RRR,    0x680,  "fnms",         _A4(A_C,A_A,A_B,A_T),   02111,  FP6)    /* FNMSub        RC<-RT-RA*RB */
+APUOP(M_MPYA,          RRR,    0x600,  "mpya",         _A4(A_C,A_A,A_B,A_T),   02111,  FP7)    /* MPYA          RC<-RA*RB+RT */
+APUOP(M_SELB,          RRR,    0x400,  "selb",         _A4(A_C,A_A,A_B,A_T),   02111,  FX2)    /* SELectBits    RC<-RA&RT|RB&!RT */
+/* for system function call, this uses op-code of mtspr */
+APUOP(M_SYSCALL,       RI7,    0x10c,  "syscall",      _A3(A_T,A_A,A_S7N),     00002,  SPR)        /* System Call */
+/*
+pseudo instruction:
+system call
+value of I9    operation
+0      halt
+1              rt[0] = open(MEM[ra[0]],        ra[1])
+2              rt[0] = close(ra[0])
+3              rt[0] = read(ra[0],     MEM[ra[1]],     ra[2])
+4              rt[0] = write(ra[0],    MEM[ra[1]],     ra[2])
+5              printf(MEM[ra[0]],      ra[1],  ra[2],  ra[3])
+42             rt[0] = clock()
+52             rt[0] = lseek(ra0,      ra1,    ra2)
+
+*/
+
+
+/* new multiprecision add/sub */
+APUOP(M_ADDX,          RR,     0x340,  "addx",         _A3(A_T,A_A,A_B),               00113,          FX2)    /* Add_eXtended  RT<-RA+RB+RT */
+APUOP(M_CG,            RR,     0x0c2,  "cg",           _A3(A_T,A_A,A_B),               00112,          FX2)    /* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_CGX,           RR,     0x342,  "cgx",          _A3(A_T,A_A,A_B),               00113,          FX2)    /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+APUOP(M_SFX,           RR,     0x341,  "sfx",          _A3(A_T,A_A,A_B),               00113,          FX2)    /* Add_eXtended  RT<-RA+RB+RT */
+APUOP(M_BG,            RR,     0x042,  "bg",           _A3(A_T,A_A,A_B),               00112,          FX2)    /* CarryGenerate RT<-cout(RA+RB) */
+APUOP(M_BGX,           RR,     0x343,  "bgx",          _A3(A_T,A_A,A_B),               00113,          FX2)    /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
+
+/*
+
+The following ops are a subset of above except with feature bits set.
+Feature bits are bits 11-17 of the instruction:
+
+  11 - C & P feature bit
+  12 - disable interrupts
+  13 - enable interrupts
+
+*/
+APUOPFB(M_BID,         RR,     0x1a8,  0x20,   "bid",          _A1(A_A),               00010,  BR)     /* BI            IP<-RA */
+APUOPFB(M_BIE,         RR,     0x1a8,  0x10,   "bie",          _A1(A_A),               00010,  BR)     /* BI            IP<-RA */
+APUOPFB(M_BISLD,       RR,     0x1a9,  0x20,   "bisld",        _A2(A_T,A_A),   00012,  BR)     /* BISL          RT,IP<-IP,RA */
+APUOPFB(M_BISLE,       RR,     0x1a9,  0x10,   "bisle",        _A2(A_T,A_A),   00012,  BR)     /* BISL          RT,IP<-IP,RA */
+APUOPFB(M_IRETD,       RR,     0x1aa,  0x20,   "iretd",        _A1(A_A),       00010,  BR)     /* IRET          IP<-SRR0 */
+APUOPFB(M_IRETD2,      RR,     0x1aa,  0x20,   "iretd",        _A0(),          00010,  BR)     /* IRET          IP<-SRR0 */
+APUOPFB(M_IRETE,       RR,     0x1aa,  0x10,   "irete",        _A1(A_A),       00010,  BR)     /* IRET          IP<-SRR0 */
+APUOPFB(M_IRETE2,      RR,     0x1aa,  0x10,   "irete",        _A0(),          00010,  BR)     /* IRET          IP<-SRR0 */
+APUOPFB(M_BISLEDD,     RR,     0x1ab,  0x20,   "bisledd",      _A2(A_T,A_A),   00012,  BR)     /* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BISLEDE,     RR,     0x1ab,  0x10,   "bislede",      _A2(A_T,A_A),   00012,  BR)     /* BISLED        RT,IP<-IP,RA_if(ext) */
+APUOPFB(M_BIHNZD,      RR,     0x12b,  0x20,   "bihnzd",       _A2(A_T,A_A),   00011,  BR)     /* BIHNZ         IP<-RA_if(RT) */
+APUOPFB(M_BIHNZE,      RR,     0x12b,  0x10,   "bihnze",       _A2(A_T,A_A),   00011,  BR)     /* BIHNZ         IP<-RA_if(RT) */
+APUOPFB(M_BIHZD,       RR,     0x12a,  0x20,   "bihzd",        _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BIHZE,       RR,     0x12a,  0x10,   "bihze",        _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BINZD,       RR,     0x129,  0x20,   "binzd",        _A2(A_T,A_A),   00011,  BR)     /* BINZ          IP<-RA_if(RT) */
+APUOPFB(M_BINZE,       RR,     0x129,  0x10,   "binze",        _A2(A_T,A_A),   00011,  BR)     /* BINZ          IP<-RA_if(RT) */
+APUOPFB(M_BIZD,                RR,     0x128,  0x20,   "bizd",         _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIZE,                RR,     0x128,  0x10,   "bize",         _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_SYNCC,       RR,     0x002,  0x40,   "syncc",        _A0(),          00000,  BR)     /* SYNCC          flush_pipe */
+APUOPFB(M_HBRP,                LBTI,   0x1ac,  0x40,   "hbrp",         _A0(),          00010,  LS)     /* HBR           BTB[B9]<-M[Ra] */
+
+/* Synonyms required by the AS manual. */
+APUOP(M_LR,            RI10,   0x020,  "lr",           _A2(A_T,A_A),   00012,  FX2)    /* OR%I          RT<-RA|I10 */
+APUOP(M_BIHT,          RR,     0x12b,  "biht",         _A2(A_T,A_A),   00011,  BR)     /* BIHNZ         IP<-RA_if(RT) */
+APUOP(M_BIHF,          RR,     0x12a,  "bihf",         _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOP(M_BIT,           RR,     0x129,  "bit",          _A2(A_T,A_A),   00011,  BR)     /* BINZ          IP<-RA_if(RT) */
+APUOP(M_BIF,           RR,     0x128,  "bif",          _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIHTD,       RR,     0x12b,  0x20,   "bihtd",        _A2(A_T,A_A),   00011,  BR)     /* BIHNF         IP<-RA_if(RT) */
+APUOPFB(M_BIHTE,       RR,     0x12b,  0x10,   "bihte",        _A2(A_T,A_A),   00011,  BR)     /* BIHNF         IP<-RA_if(RT) */
+APUOPFB(M_BIHFD,       RR,     0x12a,  0x20,   "bihfd",        _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BIHFE,       RR,     0x12a,  0x10,   "bihfe",        _A2(A_T,A_A),   00011,  BR)     /* BIHZ          IP<-RA_if(RT) */
+APUOPFB(M_BITD,        RR,     0x129,  0x20,   "bitd",         _A2(A_T,A_A),   00011,  BR)     /* BINF          IP<-RA_if(RT) */
+APUOPFB(M_BITE,        RR,     0x129,  0x10,   "bite",         _A2(A_T,A_A),   00011,  BR)     /* BINF          IP<-RA_if(RT) */
+APUOPFB(M_BIFD,                RR,     0x128,  0x20,   "bifd",         _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+APUOPFB(M_BIFE,                RR,     0x128,  0x10,   "bife",         _A2(A_T,A_A),   00011,  BR)     /* BIZ           IP<-RA_if(RT) */
+
+#undef _A0
+#undef _A1
+#undef _A2
+#undef _A3
+#undef _A4
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
new file mode 100644 (file)
index 0000000..efffde9
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPU opcode list
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License along
+   with this program; if not, write to the Free Software Foundation, Inc.,
+   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+#include "spu.h"
+
+/* This file holds the Spu opcode table */
+\f
+
+/*
+   Example contents of spu-insn.h
+      id_tag   mode    mode    type    opcode  mnemonic        asmtype     dependency          FPU     L/S?    branch? instruction   
+                QUAD   WORD                                               (0,RC,RB,RA,RT)    latency                                           
+   APUOP(M_LQD,        1,      0,      RI9,    0x1f8,  "lqd",          ASM_RI9IDX,     00012,          FXU,    1,      0)      Load Quadword d-form 
+ */
+
+const struct spu_opcode spu_opcodes[] = {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+       { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+       { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+};
+
+const int spu_num_opcodes =
+  sizeof (spu_opcodes) / sizeof (spu_opcodes[0]);
diff --git a/arch/powerpc/xmon/spu.h b/arch/powerpc/xmon/spu.h
new file mode 100644 (file)
index 0000000..c761fc8
--- /dev/null
@@ -0,0 +1,126 @@
+/* SPU ELF support for BFD.
+
+   Copyright 2006 Free Software Foundation, Inc.
+
+   This file is part of GDB, GAS, and the GNU binutils.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software Foundation,
+   Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
+
+
+/* These two enums are from rel_apu/common/spu_asm_format.h */
+/* definition of instruction format */
+typedef enum {
+  RRR,
+  RI18,
+  RI16,
+  RI10,
+  RI8,
+  RI7,
+  RR,
+  LBT,
+  LBTI,
+  IDATA,
+  UNKNOWN_IFORMAT
+} spu_iformat;
+
+/* These values describe assembly instruction arguments.  They indicate
+ * how to encode, range checking and which relocation to use. */
+typedef enum {
+  A_T,  /* register at pos 0 */
+  A_A,  /* register at pos 7 */
+  A_B,  /* register at pos 14 */
+  A_C,  /* register at pos 21 */
+  A_S,  /* special purpose register at pos 7 */
+  A_H,  /* channel register at pos 7 */
+  A_P,  /* parenthesis, this has to separate regs from immediates */
+  A_S3,
+  A_S6,
+  A_S7N,
+  A_S7,
+  A_U7A,
+  A_U7B,
+  A_S10B,
+  A_S10,
+  A_S11,
+  A_S11I,
+  A_S14,
+  A_S16,
+  A_S18,
+  A_R18,
+  A_U3,
+  A_U5,
+  A_U6,
+  A_U7,
+  A_U14,
+  A_X16,
+  A_U18,
+  A_MAX
+} spu_aformat;
+
+enum spu_insns {
+#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+       TAG,
+#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
+       TAG,
+#include "spu-insns.h"
+#undef APUOP
+#undef APUOPFB
+        M_SPU_MAX
+};
+
+struct spu_opcode
+{
+   spu_iformat insn_type;
+   unsigned int opcode;
+   char *mnemonic;
+   int arg[5];
+};
+
+#define SIGNED_EXTRACT(insn,size,pos) (((int)((insn) << (32-size-pos))) >> (32-size))
+#define UNSIGNED_EXTRACT(insn,size,pos) (((insn) >> pos) & ((1 << size)-1))
+
+#define DECODE_INSN_RT(insn) (insn & 0x7f)
+#define DECODE_INSN_RA(insn) ((insn >> 7) & 0x7f)
+#define DECODE_INSN_RB(insn) ((insn >> 14) & 0x7f)
+#define DECODE_INSN_RC(insn) ((insn >> 21) & 0x7f)
+
+#define DECODE_INSN_I10(insn) SIGNED_EXTRACT(insn,10,14)
+#define DECODE_INSN_U10(insn) UNSIGNED_EXTRACT(insn,10,14)
+
+/* For branching, immediate loads, hbr and  lqa/stqa. */
+#define DECODE_INSN_I16(insn) SIGNED_EXTRACT(insn,16,7)
+#define DECODE_INSN_U16(insn) UNSIGNED_EXTRACT(insn,16,7)
+
+/* for stop */
+#define DECODE_INSN_U14(insn) UNSIGNED_EXTRACT(insn,14,0)
+
+/* For ila */
+#define DECODE_INSN_I18(insn) SIGNED_EXTRACT(insn,18,7)
+#define DECODE_INSN_U18(insn) UNSIGNED_EXTRACT(insn,18,7)
+
+/* For rotate and shift and generate control mask */
+#define DECODE_INSN_I7(insn) SIGNED_EXTRACT(insn,7,14)
+#define DECODE_INSN_U7(insn) UNSIGNED_EXTRACT(insn,7,14)
+
+/* For float <-> int conversion */
+#define DECODE_INSN_I8(insn)  SIGNED_EXTRACT(insn,8,14)
+#define DECODE_INSN_U8(insn) UNSIGNED_EXTRACT(insn,8,14)
+
+/* For hbr  */
+#define DECODE_INSN_I9a(insn) ((SIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_I9b(insn) ((SIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9a(insn) ((UNSIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+#define DECODE_INSN_U9b(insn) ((UNSIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
+
index f56ffef4defa7f0b3724fea942afda436008d3a8..a34ed49e0356c418a81fa5cfc85e0f47c0cd84a3 100644 (file)
 #include <asm/sstep.h>
 #include <asm/bug.h>
 #include <asm/irq_regs.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/firmware.h>
 
 #ifdef CONFIG_PPC64
 #include <asm/hvcall.h>
 #include <asm/paca.h>
+#include <asm/iseries/it_lp_reg_save.h>
 #endif
 
 #include "nonstdio.h"
+#include "dis-asm.h"
 
 #define scanhex        xmon_scanhex
 #define skipbl xmon_skipbl
@@ -107,7 +112,6 @@ static int bsesc(void);
 static void dump(void);
 static void prdump(unsigned long, long);
 static int ppc_inst_dump(unsigned long, long, int);
-void print_address(unsigned long);
 static void backtrace(struct pt_regs *);
 static void excprint(struct pt_regs *);
 static void prregs(struct pt_regs *);
@@ -147,9 +151,9 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
                              const char *after);
 static const char *getvecname(unsigned long vec);
 
-int xmon_no_auto_backtrace;
+static int do_spu_cmd(void);
 
-extern int print_insn_powerpc(unsigned long, unsigned long, int);
+int xmon_no_auto_backtrace;
 
 extern void xmon_enter(void);
 extern void xmon_leave(void);
@@ -209,8 +213,15 @@ Commands:\n\
   mi   show information about memory allocation\n\
   p    call a procedure\n\
   r    print registers\n\
-  s    single step\n\
-  S    print special registers\n\
+  s    single step\n"
+#ifdef CONFIG_SPU_BASE
+"  ss  stop execution on all spus\n\
+  sr   restore execution on stopped spus\n\
+  sf  #        dump spu fields for spu # (in hex)\n\
+  sd  #        dump spu local store for spu # (in hex)\
+  sdi #        disassemble spu local store for spu # (in hex)\n"
+#endif
+"  S   print special registers\n\
   t    print backtrace\n\
   x    exit monitor and recover\n\
   X    exit monitor and dont recover\n"
@@ -518,6 +529,7 @@ int xmon(struct pt_regs *excp)
                xmon_save_regs(&regs);
                excp = &regs;
        }
+
        return xmon_core(excp, 0);
 }
 EXPORT_SYMBOL(xmon);
@@ -809,6 +821,8 @@ cmds(struct pt_regs *excp)
                        cacheflush();
                        break;
                case 's':
+                       if (do_spu_cmd() == 0)
+                               break;
                        if (do_step(excp))
                                return cmd;
                        break;
@@ -1555,11 +1569,6 @@ void super_regs(void)
 {
        int cmd;
        unsigned long val;
-#ifdef CONFIG_PPC_ISERIES
-       struct paca_struct *ptrPaca = NULL;
-       struct lppaca *ptrLpPaca = NULL;
-       struct ItLpRegSave *ptrLpRegSave = NULL;
-#endif
 
        cmd = skipbl();
        if (cmd == '\n') {
@@ -1576,26 +1585,32 @@ void super_regs(void)
                printf("sp   = "REG"  sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
                printf("toc  = "REG"  dar  = "REG"\n", toc, mfspr(SPRN_DAR));
 #ifdef CONFIG_PPC_ISERIES
-               // Dump out relevant Paca data areas.
-               printf("Paca: \n");
-               ptrPaca = get_paca();
-    
-               printf("  Local Processor Control Area (LpPaca): \n");
-               ptrLpPaca = ptrPaca->lppaca_ptr;
-               printf("    Saved Srr0=%.16lx  Saved Srr1=%.16lx \n",
-                      ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
-               printf("    Saved Gpr3=%.16lx  Saved Gpr4=%.16lx \n",
-                      ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
-               printf("    Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
-    
-               printf("  Local Processor Register Save Area (LpRegSave): \n");
-               ptrLpRegSave = ptrPaca->reg_save_ptr;
-               printf("    Saved Sprg0=%.16lx  Saved Sprg1=%.16lx \n",
-                      ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
-               printf("    Saved Sprg2=%.16lx  Saved Sprg3=%.16lx \n",
-                      ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
-               printf("    Saved Msr  =%.16lx  Saved Nia  =%.16lx \n",
-                      ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+               if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+                       struct paca_struct *ptrPaca;
+                       struct lppaca *ptrLpPaca;
+                       struct ItLpRegSave *ptrLpRegSave;
+
+                       /* Dump out relevant Paca data areas. */
+                       printf("Paca: \n");
+                       ptrPaca = get_paca();
+
+                       printf("  Local Processor Control Area (LpPaca): \n");
+                       ptrLpPaca = ptrPaca->lppaca_ptr;
+                       printf("    Saved Srr0=%.16lx  Saved Srr1=%.16lx \n",
+                              ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
+                       printf("    Saved Gpr3=%.16lx  Saved Gpr4=%.16lx \n",
+                              ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
+                       printf("    Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
+
+                       printf("  Local Processor Register Save Area (LpRegSave): \n");
+                       ptrLpRegSave = ptrPaca->reg_save_ptr;
+                       printf("    Saved Sprg0=%.16lx  Saved Sprg1=%.16lx \n",
+                              ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
+                       printf("    Saved Sprg2=%.16lx  Saved Sprg3=%.16lx \n",
+                              ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
+                       printf("    Saved Msr  =%.16lx  Saved Nia  =%.16lx \n",
+                              ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
+               }
 #endif
 
                return;
@@ -2053,8 +2068,11 @@ prdump(unsigned long adrs, long ndump)
        }
 }
 
+typedef int (*instruction_dump_func)(unsigned long inst, unsigned long addr);
+
 int
-ppc_inst_dump(unsigned long adr, long count, int praddr)
+generic_inst_dump(unsigned long adr, long count, int praddr,
+                       instruction_dump_func dump_func)
 {
        int nr, dotted;
        unsigned long first_adr;
@@ -2084,12 +2102,18 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
                if (praddr)
                        printf(REG"  %.8x", adr, inst);
                printf("\t");
-               print_insn_powerpc(inst, adr, 0);       /* always returns 4 */
+               dump_func(inst, adr);
                printf("\n");
        }
        return adr - first_adr;
 }
 
+int
+ppc_inst_dump(unsigned long adr, long count, int praddr)
+{
+       return generic_inst_dump(adr, count, praddr, print_insn_powerpc);
+}
+
 void
 print_address(unsigned long addr)
 {
@@ -2557,6 +2581,10 @@ void dump_segments(void)
 
 void xmon_init(int enable)
 {
+#ifdef CONFIG_PPC_ISERIES
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               return;
+#endif
        if (enable) {
                __debugger = xmon;
                __debugger_ipi = xmon_ipi;
@@ -2594,6 +2622,10 @@ static struct sysrq_key_op sysrq_xmon_op =
 
 static int __init setup_xmon_sysrq(void)
 {
+#ifdef CONFIG_PPC_ISERIES
+       if (firmware_has_feature(FW_FEATURE_ISERIES))
+               return 0;
+#endif
        register_sysrq_key('x', &sysrq_xmon_op);
        return 0;
 }
@@ -2630,3 +2662,263 @@ void __init xmon_setup(void)
        if (xmon_early)
                debugger(NULL);
 }
+
+#ifdef CONFIG_SPU_BASE
+
+struct spu_info {
+       struct spu *spu;
+       u64 saved_mfc_sr1_RW;
+       u32 saved_spu_runcntl_RW;
+       unsigned long dump_addr;
+       u8 stopped_ok;
+};
+
+#define XMON_NUM_SPUS  16      /* Enough for current hardware */
+
+static struct spu_info spu_info[XMON_NUM_SPUS];
+
+void xmon_register_spus(struct list_head *list)
+{
+       struct spu *spu;
+
+       list_for_each_entry(spu, list, full_list) {
+               if (spu->number >= XMON_NUM_SPUS) {
+                       WARN_ON(1);
+                       continue;
+               }
+
+               spu_info[spu->number].spu = spu;
+               spu_info[spu->number].stopped_ok = 0;
+               spu_info[spu->number].dump_addr = (unsigned long)
+                               spu_info[spu->number].spu->local_store;
+       }
+}
+
+static void stop_spus(void)
+{
+       struct spu *spu;
+       int i;
+       u64 tmp;
+
+       for (i = 0; i < XMON_NUM_SPUS; i++) {
+               if (!spu_info[i].spu)
+                       continue;
+
+               if (setjmp(bus_error_jmp) == 0) {
+                       catch_memory_errors = 1;
+                       sync();
+
+                       spu = spu_info[i].spu;
+
+                       spu_info[i].saved_spu_runcntl_RW =
+                               in_be32(&spu->problem->spu_runcntl_RW);
+
+                       tmp = spu_mfc_sr1_get(spu);
+                       spu_info[i].saved_mfc_sr1_RW = tmp;
+
+                       tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+                       spu_mfc_sr1_set(spu, tmp);
+
+                       sync();
+                       __delay(200);
+
+                       spu_info[i].stopped_ok = 1;
+
+                       printf("Stopped spu %.2d (was %s)\n", i,
+                                       spu_info[i].saved_spu_runcntl_RW ?
+                                       "running" : "stopped");
+               } else {
+                       catch_memory_errors = 0;
+                       printf("*** Error stopping spu %.2d\n", i);
+               }
+               catch_memory_errors = 0;
+       }
+}
+
+static void restart_spus(void)
+{
+       struct spu *spu;
+       int i;
+
+       for (i = 0; i < XMON_NUM_SPUS; i++) {
+               if (!spu_info[i].spu)
+                       continue;
+
+               if (!spu_info[i].stopped_ok) {
+                       printf("*** Error, spu %d was not successfully stopped"
+                                       ", not restarting\n", i);
+                       continue;
+               }
+
+               if (setjmp(bus_error_jmp) == 0) {
+                       catch_memory_errors = 1;
+                       sync();
+
+                       spu = spu_info[i].spu;
+                       spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW);
+                       out_be32(&spu->problem->spu_runcntl_RW,
+                                       spu_info[i].saved_spu_runcntl_RW);
+
+                       sync();
+                       __delay(200);
+
+                       printf("Restarted spu %.2d\n", i);
+               } else {
+                       catch_memory_errors = 0;
+                       printf("*** Error restarting spu %.2d\n", i);
+               }
+               catch_memory_errors = 0;
+       }
+}
+
+#define DUMP_WIDTH     23
+#define DUMP_VALUE(format, field, value)                               \
+do {                                                                   \
+       if (setjmp(bus_error_jmp) == 0) {                               \
+               catch_memory_errors = 1;                                \
+               sync();                                                 \
+               printf("  %-*s = "format"\n", DUMP_WIDTH,               \
+                               #field, value);                         \
+               sync();                                                 \
+               __delay(200);                                           \
+       } else {                                                        \
+               catch_memory_errors = 0;                                \
+               printf("  %-*s = *** Error reading field.\n",           \
+                                       DUMP_WIDTH, #field);            \
+       }                                                               \
+       catch_memory_errors = 0;                                        \
+} while (0)
+
+#define DUMP_FIELD(obj, format, field) \
+       DUMP_VALUE(format, field, obj->field)
+
+static void dump_spu_fields(struct spu *spu)
+{
+       printf("Dumping spu fields at address %p:\n", spu);
+
+       DUMP_FIELD(spu, "0x%x", number);
+       DUMP_FIELD(spu, "%s", name);
+       DUMP_FIELD(spu, "0x%lx", local_store_phys);
+       DUMP_FIELD(spu, "0x%p", local_store);
+       DUMP_FIELD(spu, "0x%lx", ls_size);
+       DUMP_FIELD(spu, "0x%x", node);
+       DUMP_FIELD(spu, "0x%lx", flags);
+       DUMP_FIELD(spu, "0x%lx", dar);
+       DUMP_FIELD(spu, "0x%lx", dsisr);
+       DUMP_FIELD(spu, "%d", class_0_pending);
+       DUMP_FIELD(spu, "0x%lx", irqs[0]);
+       DUMP_FIELD(spu, "0x%lx", irqs[1]);
+       DUMP_FIELD(spu, "0x%lx", irqs[2]);
+       DUMP_FIELD(spu, "0x%x", slb_replace);
+       DUMP_FIELD(spu, "%d", pid);
+       DUMP_FIELD(spu, "%d", prio);
+       DUMP_FIELD(spu, "0x%p", mm);
+       DUMP_FIELD(spu, "0x%p", ctx);
+       DUMP_FIELD(spu, "0x%p", rq);
+       DUMP_FIELD(spu, "0x%p", timestamp);
+       DUMP_FIELD(spu, "0x%lx", problem_phys);
+       DUMP_FIELD(spu, "0x%p", problem);
+       DUMP_VALUE("0x%x", problem->spu_runcntl_RW,
+                       in_be32(&spu->problem->spu_runcntl_RW));
+       DUMP_VALUE("0x%x", problem->spu_status_R,
+                       in_be32(&spu->problem->spu_status_R));
+       DUMP_VALUE("0x%x", problem->spu_npc_RW,
+                       in_be32(&spu->problem->spu_npc_RW));
+       DUMP_FIELD(spu, "0x%p", priv2);
+       DUMP_FIELD(spu, "0x%p", pdata);
+}
+
+int
+spu_inst_dump(unsigned long adr, long count, int praddr)
+{
+       return generic_inst_dump(adr, count, praddr, print_insn_spu);
+}
+
+static void dump_spu_ls(unsigned long num, int subcmd)
+{
+       unsigned long offset, addr, ls_addr;
+
+       if (setjmp(bus_error_jmp) == 0) {
+               catch_memory_errors = 1;
+               sync();
+               ls_addr = (unsigned long)spu_info[num].spu->local_store;
+               sync();
+               __delay(200);
+       } else {
+               catch_memory_errors = 0;
+               printf("*** Error: accessing spu info for spu %d\n", num);
+               return;
+       }
+       catch_memory_errors = 0;
+
+       if (scanhex(&offset))
+               addr = ls_addr + offset;
+       else
+               addr = spu_info[num].dump_addr;
+
+       if (addr >= ls_addr + LS_SIZE) {
+               printf("*** Error: address outside of local store\n");
+               return;
+       }
+
+       switch (subcmd) {
+       case 'i':
+               addr += spu_inst_dump(addr, 16, 1);
+               last_cmd = "sdi\n";
+               break;
+       default:
+               prdump(addr, 64);
+               addr += 64;
+               last_cmd = "sd\n";
+               break;
+       }
+
+       spu_info[num].dump_addr = addr;
+}
+
+static int do_spu_cmd(void)
+{
+       static unsigned long num = 0;
+       int cmd, subcmd = 0;
+
+       cmd = inchar();
+       switch (cmd) {
+       case 's':
+               stop_spus();
+               break;
+       case 'r':
+               restart_spus();
+               break;
+       case 'd':
+               subcmd = inchar();
+               if (isxdigit(subcmd) || subcmd == '\n')
+                       termch = subcmd;
+       case 'f':
+               scanhex(&num);
+               if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
+                       printf("*** Error: invalid spu number\n");
+                       return 0;
+               }
+
+               switch (cmd) {
+               case 'f':
+                       dump_spu_fields(spu_info[num].spu);
+                       break;
+               default:
+                       dump_spu_ls(num, subcmd);
+                       break;
+               }
+
+               break;
+       default:
+               return -1;
+       }
+
+       return 0;
+}
+#else /* ! CONFIG_SPU_BASE */
+static int do_spu_cmd(void)
+{
+       return -1;
+}
+#endif
diff --git a/arch/ppc/.gitignore b/arch/ppc/.gitignore
new file mode 100644 (file)
index 0000000..a1a869c
--- /dev/null
@@ -0,0 +1 @@
+include
index 2e1943e2781963886eda3a1b8ea2afdbb6850060..709952c25f2948c197acdbc6947c3b2493f2b1d1 100644 (file)
@@ -385,6 +385,7 @@ struct fcc_enet_private {
        phy_info_t      *phy;
        struct work_struct phy_relink;
        struct work_struct phy_display_config;
+       struct net_device *dev;
 
        uint    sequence_done;
 
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = {
        NULL
 };
 
-static void mii_display_status(void *data)
+static void mii_display_status(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       volatile struct fcc_enet_private *fep = dev->priv;
+       volatile struct fcc_enet_private *fep =
+               container_of(work, struct fcc_enet_private, phy_relink);
+       struct net_device *dev = fep->dev;
        uint s = fep->phy_status;
 
        if (!fep->link && !fep->old_link) {
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data)
        printk(".\n");
 }
 
-static void mii_display_config(void *data)
+static void mii_display_config(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       volatile struct fcc_enet_private *fep = dev->priv;
+       volatile struct fcc_enet_private *fep =
+               container_of(work, struct fcc_enet_private,
+                            phy_display_config);
+       struct net_device *dev = fep->dev;
        uint s = fep->phy_status;
 
        printk("%s: config: auto-negotiation ", dev->name);
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void)
                cep->phy_id_done = 0;
                cep->phy_addr = fip->fc_phyaddr;
                mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
-               INIT_WORK(&cep->phy_relink, mii_display_status, dev);
-               INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
+               INIT_WORK(&cep->phy_relink, mii_display_status);
+               INIT_WORK(&cep->phy_display_config, mii_display_config);
+               cep->dev = dev;
 #endif /* CONFIG_USE_MDIO */
 
                fip++;
index 2f9fa9e3d331fcdc903164068262156de075bb2c..e6c28fb423b234ba5c8f8892499923e36c1cd92e 100644 (file)
@@ -173,6 +173,7 @@ struct fec_enet_private {
        uint    phy_speed;
        phy_info_t      *phy;
        struct work_struct phy_task;
+       struct net_device *dev;
 
        uint    sequence_done;
 
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev)
        printk(".\n");
 }
 
-static void mii_display_config(void *priv)
+static void mii_display_config(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *)priv;
-       struct fec_enet_private *fep = dev->priv;
+       struct fec_enet_private *fep =
+               container_of(work, struct fec_enet_private, phy_task);
+       struct net_device *dev = fep->dev;
        volatile uint *s = &(fep->phy_status);
 
        printk("%s: config: auto-negotiation ", dev->name);
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv)
        fep->sequence_done = 1;
 }
 
-static void mii_relink(void *priv)
+static void mii_relink(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *)priv;
-       struct fec_enet_private *fep = dev->priv;
+       struct fec_enet_private *fep =
+               container_of(work, struct fec_enet_private, phy_task);
+       struct net_device *dev = fep->dev;
        int duplex;
 
        fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev)
 {
        struct fec_enet_private *fep = dev->priv;
 
-       INIT_WORK(&fep->phy_task, mii_relink, (void *)dev);
+       fep->dev = dev;
+       INIT_WORK(&fep->phy_task, mii_relink);
        schedule_work(&fep->phy_task);
 }
 
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev)
 {
        struct fec_enet_private *fep = dev->priv;
 
-       INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev);
+       fep->dev = dev;
+       INIT_WORK(&fep->phy_task, mii_display_config);
        schedule_work(&fep->phy_task);
 }
 
index ef018e25fb07a2462462a7913af412cf3491d791..edf71a4ecc95d391138424bde84c90c3d209e171 100644 (file)
@@ -77,9 +77,11 @@ config 6xx
 
 config 40x
        bool "40x"
+       select PPC_DCR_NATIVE
 
 config 44x
        bool "44x"
+       select PPC_DCR_NATIVE
 
 config 8xx
        bool "8xx"
@@ -95,6 +97,15 @@ endchoice
 config PPC_FPU
        bool
 
+config PPC_DCR_NATIVE
+       bool
+       default n
+
+config PPC_DCR
+       bool
+       depends on PPC_DCR_NATIVE
+       default y
+
 config BOOKE
        bool
        depends on E200 || E500
diff --git a/arch/ppc/boot/images/.gitignore b/arch/ppc/boot/images/.gitignore
new file mode 100644 (file)
index 0000000..21c2dc5
--- /dev/null
@@ -0,0 +1,6 @@
+sImage
+vmapus
+vmlinux*
+miboot*
+zImage*
+uImage
diff --git a/arch/ppc/boot/lib/.gitignore b/arch/ppc/boot/lib/.gitignore
new file mode 100644 (file)
index 0000000..1629a61
--- /dev/null
@@ -0,0 +1,3 @@
+inffast.c
+inflate.c
+inftrees.c
diff --git a/arch/ppc/boot/utils/.gitignore b/arch/ppc/boot/utils/.gitignore
new file mode 100644 (file)
index 0000000..bbdfb3b
--- /dev/null
@@ -0,0 +1,3 @@
+mkprep
+mkbugboot
+mktree
index 27faeca2c7a209438eca4fb394b2ce868ce357c2..3c506af1988085393265aeb5a30c6755b7ce220a 100644 (file)
@@ -313,7 +313,7 @@ early_init(int r3, int r4, int r5)
         * Identify the CPU type and fix up code sections
         * that depend on which cpu we have.
         */
-       spec = identify_cpu(offset);
+       spec = identify_cpu(offset, mfspr(SPRN_PVR));
        do_feature_fixups(spec->cpu_features,
                          PTRRELOC(&__start___ftr_fixup),
                          PTRRELOC(&__stop___ftr_fixup));
index 9661a91183b35a18f79636e2f196d6a8d3f3eaa0..2f835b9e95e488996d968c271581ae8a0f3117fe 100644 (file)
@@ -316,7 +316,7 @@ void machine_check_exception(struct pt_regs *regs)
        if (reason & MCSR_BUS_RBERR)
                printk("Bus - Read Data Bus Error\n");
        if (reason & MCSR_BUS_WBERR)
-               printk("Bus - Read Data Bus Error\n");
+               printk("Bus - Write Data Bus Error\n");
        if (reason & MCSR_BUS_IPERR)
                printk("Bus - Instruction Parity Error\n");
        if (reason & MCSR_BUS_RPERR)
index 16e8661e1fec331cf5fee951288ca6440de0e09b..61921268a0d00c290e7dfd1db163a8311d934461 100644 (file)
@@ -31,6 +31,7 @@ SECTIONS
   .plt : { *(.plt) }
   .text      :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
index 4009f4983ca60aa07e7e4c83af38b63ffc0d309f..75857b38e8947813994871f5dada6e107a64f941 100644 (file)
@@ -116,6 +116,7 @@ bubinga_early_serial_map(void)
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
 
        unsigned int bar_response, bar;
        /*
@@ -211,6 +212,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
        printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms)));
        printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));
 
+#endif
 #endif
 }
 
index 367430998fc5a8864254b39635d24b044089f69c..8474b05b795aaf3b5dd5e3a7e33c87526eceafa8 100644 (file)
@@ -126,6 +126,7 @@ cpci405_setup_arch(void)
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
        unsigned int bar_response, bar;
 
        /* Disable region first */
@@ -167,6 +168,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
                                        PCI_FUNC(hose->first_busno), bar,
                                        &bar_response);
        }
+#endif
 }
 
 void __init
index ae5c82081c95608e0543f9c1eff8c9fa0086155c..e5adf9ba1fca34ddef4a525a975c60ace70e3c76 100644 (file)
@@ -68,6 +68,7 @@ ep405_setup_arch(void)
 void __init
 bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
 {
+#ifdef CONFIG_PCI
        unsigned int bar_response, bar;
        /*
         * Expected PCI mapping:
@@ -130,6 +131,7 @@ bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
                    PCI_FUNC(hose->first_busno), bar, bar_response);
        }
        /* end work arround */
+#endif
 }
 
 void __init
index 3397f0de1592da081f70bda02826cb9665816e16..b84f8df325c493930d43a6c8d0017b15d00abd49 100644 (file)
@@ -121,8 +121,8 @@ mpc834x_sys_setup_arch(void)
 
        mdata->irq[0] = MPC83xx_IRQ_EXT1;
        mdata->irq[1] = MPC83xx_IRQ_EXT2;
-       mdata->irq[2] = -1;
-       mdata->irq[31] = -1;
+       mdata->irq[2] = PHY_POLL;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
index 4f839da6782fb0d5ad40299b19ba969850c5302e..00a3ba57063f6cf5dacb38d52a2d9d1a1c84ae84 100644 (file)
@@ -92,9 +92,9 @@ mpc8540ads_setup_arch(void)
 
        mdata->irq[0] = MPC85xx_IRQ_EXT5;
        mdata->irq[1] = MPC85xx_IRQ_EXT5;
-       mdata->irq[2] = -1;
+       mdata->irq[2] = PHY_POLL;
        mdata->irq[3] = MPC85xx_IRQ_EXT5;
-       mdata->irq[31] = -1;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index 14ecec7bbed77d8d349bdf190d3789fc0e38c717..3a060468dd950ae8102567de4c0c278d23a722c6 100644 (file)
@@ -156,9 +156,9 @@ mpc8560ads_setup_arch(void)
 
        mdata->irq[0] = MPC85xx_IRQ_EXT5;
        mdata->irq[1] = MPC85xx_IRQ_EXT5;
-       mdata->irq[2] = -1;
+       mdata->irq[2] = PHY_POLL;
        mdata->irq[3] = MPC85xx_IRQ_EXT5;
-       mdata->irq[31] = -1;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index 5ce0f69c1db66ad4b11b4554d447587b4771ba4c..2d59eb776c95757c907046ce04e4069f333ead7b 100644 (file)
@@ -451,9 +451,9 @@ mpc85xx_cds_setup_arch(void)
 
        mdata->irq[0] = MPC85xx_IRQ_EXT5;
        mdata->irq[1] = MPC85xx_IRQ_EXT5;
-       mdata->irq[2] = -1;
-       mdata->irq[3] = -1;
-       mdata->irq[31] = -1;
+       mdata->irq[2] = PHY_POLL;
+       mdata->irq[3] = PHY_POLL;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index 764d580ff5356c0cbf39a4e6e0627fe4e764be4d..1d10ab98f66dda9d1e63554768e0f7ca277ac984 100644 (file)
@@ -129,7 +129,7 @@ sbc8560_setup_arch(void)
 
        mdata->irq[25] = MPC85xx_IRQ_EXT6;
        mdata->irq[26] = MPC85xx_IRQ_EXT7;
-       mdata->irq[31] = -1;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index 4bb18ab27672f24fb78b76767f788e8a2892db0b..b1f5b737c70deed904db4198e69a940f3c3000a6 100644 (file)
@@ -123,7 +123,7 @@ gp3_setup_arch(void)
 
        mdata->irq[2] = MPC85xx_IRQ_EXT5;
        mdata->irq[4] = MPC85xx_IRQ_EXT5;
-       mdata->irq[31] = -1;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index dd45f2e1844951c5416de4189406bf2ddd4ab123..4ee2bd156dc55c1c74e86a56e5718e0d8eb5e023 100644 (file)
@@ -137,9 +137,9 @@ tqm85xx_setup_arch(void)
 
        mdata->irq[0] = MPC85xx_IRQ_EXT8;
        mdata->irq[1] = MPC85xx_IRQ_EXT8;
-       mdata->irq[2] = -1;
+       mdata->irq[2] = PHY_POLL;
        mdata->irq[3] = MPC85xx_IRQ_EXT8;
-       mdata->irq[31] = -1;
+       mdata->irq[31] = PHY_POLL;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
index 1f9ea36837b1aff30544607bbdd69bf4bdb1769a..0bc06768cf24dd13b92a166ca0ab5bf4b99dd4a2 100644 (file)
@@ -266,10 +266,10 @@ static void __init mpc8272ads_fixup_mdio_pdata(struct platform_device *pdev,
                                              int idx)
 {
        m82xx_mii_bb_pdata.irq[0] = PHY_INTERRUPT;
-       m82xx_mii_bb_pdata.irq[1] = -1;
-       m82xx_mii_bb_pdata.irq[2] = -1;
+       m82xx_mii_bb_pdata.irq[1] = PHY_POLL;
+       m82xx_mii_bb_pdata.irq[2] = PHY_POLL;
        m82xx_mii_bb_pdata.irq[3] = PHY_INTERRUPT;
-       m82xx_mii_bb_pdata.irq[31] = -1;
+       m82xx_mii_bb_pdata.irq[31] = PHY_POLL;
 
 
        m82xx_mii_bb_pdata.mdio_dat.offset =
index e95d2c1117476d35d09dcb0f699192b697353727..8a0c07eb4449157796fb0925da4483d4cef54843 100644 (file)
@@ -361,7 +361,7 @@ int __init mpc866ads_init(void)
 
        fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
        /* No PHY interrupt line here */
-       fmpi->irq[0xf] = -1;
+       fmpi->irq[0xf] = PHY_POLL;
 
 /* Since either of the uarts could be used as console, they need to ready */
 #ifdef CONFIG_SERIAL_CPM_SMC1
@@ -380,7 +380,7 @@ int __init mpc866ads_init(void)
 
        fmpi->mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2) & 0x3F) << 1;
        /* No PHY interrupt line here */
-       fmpi->irq[0xf] = -1;
+       fmpi->irq[0xf] = PHY_POLL;
 
        return 0;
 }
index cf5ab47487a752dd352b510643889c60c542ed4f..31fb56593d179b295fbab1c54aa809eeedf67581 100644 (file)
@@ -78,7 +78,7 @@ struct platform_device ppc_sys_platform_devices[] = {
                        {
                                .name   = "pram",
                                .start  = 0x3c00,
-                               .end    = 0x3c80,
+                               .end    = 0x3c7f,
                                .flags  = IORESOURCE_MEM,
                        },
                        {
@@ -103,7 +103,7 @@ struct platform_device ppc_sys_platform_devices[] = {
                        {
                                .name   = "pram",
                                .start  = 0x3d00,
-                               .end    = 0x3d80,
+                               .end    = 0x3d7f,
                                .flags  = IORESOURCE_MEM,
                        },
 
@@ -129,7 +129,7 @@ struct platform_device ppc_sys_platform_devices[] = {
                        {
                                .name   = "pram",
                                .start  = 0x3e00,
-                               .end    = 0x3e80,
+                               .end    = 0x3e7f,
                                .flags  = IORESOURCE_MEM,
                        },
 
@@ -155,7 +155,7 @@ struct platform_device ppc_sys_platform_devices[] = {
                        {
                                .name   = "pram",
                                .start  = 0x3f00,
-                               .end    = 0x3f80,
+                               .end    = 0x3f7f,
                                .flags  = IORESOURCE_MEM,
                        },
 
index af1e8fc7d985aab03d758a9f10423a486a0eda28..b8c2372902634c22db1f20162b966445ebf434a2 100644 (file)
@@ -92,8 +92,8 @@ static int appldata_timer_active;
  * Work queue
  */
 static struct workqueue_struct *appldata_wq;
-static void appldata_work_fn(void *data);
-static DECLARE_WORK(appldata_work, appldata_work_fn, NULL);
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
 
 
 /*
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data)
  *
  * call data gathering function for each (active) module
  */
-static void appldata_work_fn(void *data)
+static void appldata_work_fn(struct work_struct *work)
 {
        struct list_head *lh;
        struct appldata_ops *ops;
@@ -561,7 +561,6 @@ appldata_offline_cpu(int cpu)
        spin_unlock(&appldata_timer_lock);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 appldata_cpu_notify(struct notifier_block *self,
                    unsigned long action, void *hcpu)
@@ -582,7 +581,6 @@ appldata_cpu_notify(struct notifier_block *self,
 static struct notifier_block appldata_nb = {
        .notifier_call = appldata_cpu_notify,
 };
-#endif
 
 /*
  * appldata_init()
index 9565a2dcfadcda474a4d5820a2a642d517c56be5..5c46054195cb29e484d9e9bb0c9c072f7fd91222 100644 (file)
@@ -176,7 +176,6 @@ struct elf_prpsinfo32
 
 #include <linux/highuid.h>
 
-#define elf_addr_t     u32
 /*
 #define init_elf_binfmt init_elf32_binfmt
 */
index 67914fe7f317cb6be4b9e864816e3020f8596e4f..576368c4f60585c4aa27fadea3fbd91e83327423 100644 (file)
@@ -200,7 +200,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
        mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
+       free_insn_slot(p->ainsn.insn, 0);
        mutex_unlock(&kprobe_mutex);
 }
 
index 2d549ed2e11399dfe17975fcae72aa512b9f52fe..bbaca66fa29356af1b0c7ea5589622f28aec4519 100644 (file)
@@ -11,7 +11,7 @@
 
 #include <linux/errno.h>
 #include <linux/mm.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/futex.h>
 
 #ifndef __s390x__
@@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
 {
        int oldval = 0, newval, ret;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
        default:
                ret = -ENOSYS;
        }
-       dec_preempt_count();
+       pagefault_enable();
        *old = oldval;
        return ret;
 }
index bffc7e176970ffa7eb1e6ad20203fd6862ae866b..d83d64af31f277d5451507d2b60fcc13c3a0af33 100644 (file)
@@ -51,6 +51,14 @@ config GENERIC_TIME
 config ARCH_MAY_HAVE_PC_FDC
        bool
 
+config STACKTRACE_SUPPORT
+       bool
+       default y
+
+config LOCKDEP_SUPPORT
+       bool
+       default y
+
 source "init/Kconfig"
 
 menu "System type"
@@ -219,6 +227,20 @@ config SH_SHMIN
        help
          Select SHMIN if configuring for the SHMIN board.
 
+config SH_7206_SOLUTION_ENGINE
+       bool "SolutionEngine7206"
+       select CPU_SUBTYPE_SH7206
+       help
+         Select 7206 SolutionEngine if configuring for a Hitachi SH7206
+         evaluation board.
+
+config SH_7619_SOLUTION_ENGINE
+       bool "SolutionEngine7619"
+       select CPU_SUBTYPE_SH7619
+       help
+         Select 7619 SolutionEngine if configuring for a Hitachi SH7619
+         evaluation board.
+
 config SH_UNKNOWN
        bool "BareCPU"
        help
@@ -280,12 +302,20 @@ config CF_BASE_ADDR
 
 menu "Processor features"
 
-config CPU_LITTLE_ENDIAN
-       bool "Little Endian"
+choice
+       prompt "Endianess selection" 
+       default CPU_LITTLE_ENDIAN
        help
          Some SuperH machines can be configured for either little or big
-         endian byte order. These modes require different kernels. Say Y if
-         your machine is little endian, N if it's a big endian machine.
+         endian byte order. These modes require different kernels.
+
+config CPU_LITTLE_ENDIAN
+       bool "Little Endian"
+
+config CPU_BIG_ENDIAN
+       bool "Big Endian"
+
+endchoice
 
 config SH_FPU
        bool "FPU support"
@@ -345,6 +375,9 @@ config CPU_HAS_MASKREG_IRQ
 config CPU_HAS_INTC2_IRQ
        bool
 
+config CPU_HAS_IPR_IRQ
+       bool
+
 config CPU_HAS_SR_RB
        bool "CPU has SR.RB"
        depends on CPU_SH3 || CPU_SH4
@@ -357,6 +390,9 @@ config CPU_HAS_SR_RB
          See <file:Documentation/sh/register-banks.txt> for further
          information on SR.RB and register banking in the kernel in general.
 
+config CPU_HAS_PTEA
+       bool
+
 endmenu
 
 menu "Timer support"
@@ -364,10 +400,25 @@ depends on !GENERIC_TIME
 
 config SH_TMU
        bool "TMU timer support"
+       depends on CPU_SH3 || CPU_SH4
        default y
        help
          This enables the use of the TMU as the system timer.
 
+config SH_CMT
+       bool "CMT timer support"
+       depends on CPU_SH2
+       default y
+       help
+         This enables the use of the CMT as the system timer.
+
+config SH_MTU2
+       bool "MTU2 timer support"
+       depends on CPU_SH2A
+       default n
+       help
+         This enables the use of the MTU2 as the system timer.
+
 endmenu
 
 source "arch/sh/boards/renesas/hs7751rvoip/Kconfig"
@@ -376,19 +427,52 @@ source "arch/sh/boards/renesas/rts7751r2d/Kconfig"
 
 source "arch/sh/boards/renesas/r7780rp/Kconfig"
 
+config SH_TIMER_IRQ
+       int
+       default "28" if CPU_SUBTYPE_SH7780
+       default "86" if CPU_SUBTYPE_SH7619
+       default "140" if CPU_SUBTYPE_SH7206
+       default "16"
+
+config NO_IDLE_HZ
+       bool "Dynamic tick timer"
+       help
+         Select this option if you want to disable continuous timer ticks
+         and have them programmed to occur as required. This option saves
+         power as the system can remain in idle state for longer.
+
+         By default dynamic tick is disabled during the boot, and can be
+         manually enabled with:
+
+           echo 1 > /sys/devices/system/timer/timer0/dyn_tick
+
+         Alternatively, if you want dynamic tick automatically enabled
+         during boot, pass "dyntick=enable" via the kernel command string.
+
+         Please note that dynamic tick may affect the accuracy of
+         timekeeping on some platforms depending on the implementation.
+
 config SH_PCLK_FREQ
        int "Peripheral clock frequency (in Hz)"
+       default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
+       default "31250000" if CPU_SUBTYPE_SH7619
+       default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
+                             CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \
+                             CPU_SUBTYPE_SH7206
        default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780
        default "60000000" if CPU_SUBTYPE_SH7751
-       default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \
-                             CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705
-       default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
        default "66000000" if CPU_SUBTYPE_SH4_202
        help
          This option is used to specify the peripheral clock frequency.
          This is necessary for determining the reference clock value on
          platforms lacking an RTC.
 
+config SH_CLK_MD
+       int "CPU Mode Pin Setting"
+       depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206
+       help
+         MD2 - MD0 Setting.
+
 menu "CPU Frequency scaling"
 
 source "drivers/cpufreq/Kconfig"
@@ -421,6 +505,8 @@ config HEARTBEAT
          behavior is platform-dependent, but normally the flash frequency is
          a hyperbolic function of the 5-minute load average.
 
+source "arch/sh/drivers/Kconfig"
+
 endmenu
 
 config ISA_DMA_API
index 48479e014dac657f3b36dc4252801ed38e61b1b9..66a25ef4ef1b8bf233e7936f049c91617933596f 100644 (file)
@@ -1,5 +1,9 @@
 menu "Kernel hacking"
 
+config TRACE_IRQFLAGS_SUPPORT
+       bool
+       default y
+
 source "lib/Kconfig.debug"
 
 config SH_STANDARD_BIOS
@@ -17,7 +21,18 @@ config SH_STANDARD_BIOS
 
 config EARLY_SCIF_CONSOLE
        bool "Use early SCIF console"
-       depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS
+       help
+         This enables an early console using a fixed SCIF port. This can
+         be used by platforms that are either not running the SH
+         standard BIOS, or do not wish to use the BIOS callbacks for the
+         serial I/O.
+
+config EARLY_SCIF_CONSOLE_PORT
+       hex "SCIF port for early console"
+       depends on EARLY_SCIF_CONSOLE
+       default "0xffe00000" if CPU_SUBTYPE_SH7780
+       default "0xfffe9800" if CPU_SUBTYPE_SH72060
+       default "0xffe80000" if CPU_SH4
 
 config EARLY_PRINTK
        bool "Early printk support"
@@ -30,6 +45,11 @@ config EARLY_PRINTK
          when the kernel may crash or hang before the serial console is
          initialised. If unsure, say N.
 
+         On devices that are running SH-IPL and want to keep the port
+         initialization consistent while not using the BIOS callbacks,
+         select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using
+         the kernel command line option to toggle back and forth.
+
 config DEBUG_STACKOVERFLOW
        bool "Check for stack overflows"
        depends on DEBUG_KERNEL
index 26d62ff51a64e951b96aa90d1620365f15c7a1a1..d10bba5e1074bef182503c598125ef0fa963a7d8 100644 (file)
 # for "archclean" and "archdep" for cleaning up and making dependencies for
 # this architecture
 #
-
-cflags-y                               := -mb
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     := -ml
-
 isa-y                                  := any
 isa-$(CONFIG_SH_DSP)                   := sh
 isa-$(CONFIG_CPU_SH2)                  := sh2
@@ -38,13 +34,16 @@ isa-y                       := $(isa-y)-nofpu
 endif
 endif
 
-cflags-y       += $(call as-option,-Wa$(comma)-isa=$(isa-y),)
-
-cflags-$(CONFIG_CPU_SH2)               += -m2
-cflags-$(CONFIG_CPU_SH3)               += -m3
-cflags-$(CONFIG_CPU_SH4)               += -m4 \
+cflags-$(CONFIG_CPU_SH2)               := -m2
+cflags-$(CONFIG_CPU_SH3)               := -m3
+cflags-$(CONFIG_CPU_SH4)               := -m4 \
        $(call cc-option,-mno-implicit-fp,-m4-nofpu)
-cflags-$(CONFIG_CPU_SH4A)              += $(call cc-option,-m4a-nofpu,)
+cflags-$(CONFIG_CPU_SH4A)              := -m4a $(call cc-option,-m4a-nofpu,)
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mb
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -ml
+
+cflags-y       += $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding
 
 cflags-$(CONFIG_SH_DSP)                        += -Wa,-dsp
 cflags-$(CONFIG_SH_KGDB)               += -g
@@ -59,7 +58,9 @@ OBJCOPYFLAGS  := -O binary -R .note -R .comment -R .stab -R .stabstr -S
 # never be used by anyone. Use a board-specific defconfig that has a
 # reasonable chance of being current instead.
 #
-KBUILD_DEFCONFIG := rts7751r2d_defconfig
+KBUILD_DEFCONFIG := r7780rp_defconfig
+
+KBUILD_IMAGE   := arch/sh/boot/zImage
 
 #
 # Choosing incompatible machines durings configuration will result in
@@ -109,6 +110,8 @@ machdir-$(CONFIG_SH_SH4202_MICRODEV)                := superh/microdev
 machdir-$(CONFIG_SH_LANDISK)                   := landisk
 machdir-$(CONFIG_SH_TITAN)                     := titan
 machdir-$(CONFIG_SH_SHMIN)                     := shmin
+machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE)      := se/7206
+machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE)      := se/7619
 machdir-$(CONFIG_SH_UNKNOWN)                   := unknown
 
 incdir-y                       := $(notdir $(machdir-y))
@@ -124,6 +127,7 @@ core-$(CONFIG_HD64465)              += arch/sh/cchips/hd6446x/hd64465/
 core-$(CONFIG_VOYAGERGX)       += arch/sh/cchips/voyagergx/
 
 cpuincdir-$(CONFIG_CPU_SH2)    := cpu-sh2
+cpuincdir-$(CONFIG_CPU_SH2A)   := cpu-sh2a
 cpuincdir-$(CONFIG_CPU_SH3)    := cpu-sh3
 cpuincdir-$(CONFIG_CPU_SH4)    := cpu-sh4
 
index f1776d0279787756c02dc48bd106a5af430b68a8..574b0316ed564ace61bd0ca0ce5364f2d38e1741 100644 (file)
@@ -3,4 +3,6 @@
 #
 
 obj-y   := setup.o io.o irq.o
-obj-$(CONFIG_HEARTBEAT)        += led.o
+
+obj-$(CONFIG_HEARTBEAT)                += led.o
+obj-$(CONFIG_PUSH_SWITCH)      += psw.o
index aa15ec5bc69ee42cf26b776f20562520277f1f46..cc381e19778305a121b0a61c827c066ba515e230 100644 (file)
@@ -10,6 +10,7 @@
  */
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <asm/r7780rp.h>
 
diff --git a/arch/sh/boards/renesas/r7780rp/psw.c b/arch/sh/boards/renesas/r7780rp/psw.c
new file mode 100644 (file)
index 0000000..c844dfa
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * arch/sh/boards/renesas/r7780rp/psw.c
+ *
+ * push switch support for RDBRP-1/RDBREVRP-1 debug boards.
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/mach/r7780rp.h>
+#include <asm/push-switch.h>
+
+static irqreturn_t psw_irq_handler(int irq, void *arg)
+{
+       struct platform_device *pdev = arg;
+       struct push_switch *psw = platform_get_drvdata(pdev);
+       struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+       unsigned int l, mask;
+       int ret = 0;
+
+       l = ctrl_inw(PA_DBSW);
+
+       /* Nothing to do if there's no state change */
+       if (psw->state) {
+               ret = 1;
+               goto out;
+       }
+
+       mask = l & 0x70;
+       /* Figure out who raised it */
+       if (mask & (1 << psw_info->bit)) {
+               psw->state = !!(mask & (1 << psw_info->bit));
+               if (psw->state) /* debounce */
+                       mod_timer(&psw->debounce, jiffies + 50);
+
+               ret = 1;
+       }
+
+out:
+       /* Clear the switch IRQs */
+       l |= (0x7 << 12);
+       ctrl_outw(l, PA_DBSW);
+
+       return IRQ_RETVAL(ret);
+}
+
+static struct resource psw_resources[] = {
+       [0] = {
+               .start  = IRQ_PSW,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct push_switch_platform_info s2_platform_data = {
+       .name           = "s2",
+       .bit            = 6,
+       .irq_flags      = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                         IRQF_SHARED,
+       .irq_handler    = psw_irq_handler,
+};
+
+static struct platform_device s2_switch_device = {
+       .name           = "push-switch",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(psw_resources),
+       .resource       = psw_resources,
+       .dev            = {
+               .platform_data = &s2_platform_data,
+       },
+};
+
+static struct push_switch_platform_info s3_platform_data = {
+       .name           = "s3",
+       .bit            = 5,
+       .irq_flags      = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                         IRQF_SHARED,
+       .irq_handler    = psw_irq_handler,
+};
+
+static struct platform_device s3_switch_device = {
+       .name           = "push-switch",
+       .id             = 1,
+       .num_resources  = ARRAY_SIZE(psw_resources),
+       .resource       = psw_resources,
+       .dev            = {
+               .platform_data = &s3_platform_data,
+       },
+};
+
+static struct push_switch_platform_info s4_platform_data = {
+       .name           = "s4",
+       .bit            = 4,
+       .irq_flags      = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                         IRQF_SHARED,
+       .irq_handler    = psw_irq_handler,
+};
+
+static struct platform_device s4_switch_device = {
+       .name           = "push-switch",
+       .id             = 2,
+       .num_resources  = ARRAY_SIZE(psw_resources),
+       .resource       = psw_resources,
+       .dev            = {
+               .platform_data = &s4_platform_data,
+       },
+};
+
+static struct platform_device *psw_devices[] = {
+       &s2_switch_device, &s3_switch_device, &s4_switch_device,
+};
+
+static int __init psw_init(void)
+{
+       return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices));
+}
+module_init(psw_init);
index c331caeb694b681658639d5a8e75a3f765afa36c..9f89c8de9db98ba9f5a4e0ad3f6478520af515a0 100644 (file)
@@ -44,8 +44,37 @@ static struct platform_device m66596_usb_host_device = {
        .resource       = m66596_usb_host_resources,
 };
 
+static struct resource cf_ide_resources[] = {
+       [0] = {
+               .start  = 0x1f0,
+               .end    = 0x1f0 + 8,
+               .flags  = IORESOURCE_IO,
+       },
+       [1] = {
+               .start  = 0x1f0 + 0x206,
+               .end    = 0x1f0 + 8 + 0x206 + 8,
+               .flags  = IORESOURCE_IO,
+       },
+       [2] = {
+#ifdef CONFIG_SH_R7780MP
+               .start  = 1,
+#else
+               .start  = 4,
+#endif
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device cf_ide_device  = {
+       .name           = "pata_platform",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(cf_ide_resources),
+       .resource       = cf_ide_resources,
+};
+
 static struct platform_device *r7780rp_devices[] __initdata = {
        &m66596_usb_host_device,
+       &cf_ide_device,
 };
 
 static int __init r7780rp_devices_setup(void)
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile
new file mode 100644 (file)
index 0000000..63950f4
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the 7206 SolutionEngine specific parts of the kernel
+#
+
+obj-y   := setup.o io.o irq.o
+obj-$(CONFIG_HEARTBEAT) += led.o
+
diff --git a/arch/sh/boards/se/7206/io.c b/arch/sh/boards/se/7206/io.c
new file mode 100644 (file)
index 0000000..b557273
--- /dev/null
@@ -0,0 +1,123 @@
+/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
+ *
+ * linux/arch/sh/boards/se/7206/io.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7206 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7206.h>
+
+
+static inline void delay(void)
+{
+       ctrl_inw(0x20000000);  /* P2 ROM Area */
+}
+
+/* MS7750 requires special versions of in*, out* routines, since
+   PC-like io ports are located at upper half byte of 16-bit word which
+   can be accessed only with 16-bit wide.  */
+
+static inline volatile __u16 *
+port2adr(unsigned int port)
+{
+       if (port >= 0x2000)
+               return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
+       else if (port >= 0x300 || port < 0x310)
+               return (volatile __u16 *) (PA_SMSC + (port - 0x300));
+}
+
+unsigned char se7206_inb(unsigned long port)
+{
+       return (*port2adr(port))&0xff; 
+}
+
+unsigned char se7206_inb_p(unsigned long port)
+{
+       unsigned long v;
+
+       v = (*port2adr(port))&0xff; 
+       delay();
+       return v;
+}
+
+unsigned short se7206_inw(unsigned long port)
+{
+       return *port2adr(port);;
+}
+
+unsigned int se7206_inl(unsigned long port)
+{
+       maybebadio(port);
+       return 0;
+}
+
+void se7206_outb(unsigned char value, unsigned long port)
+{
+       *(port2adr(port)) = value;
+}
+
+void se7206_outb_p(unsigned char value, unsigned long port)
+{
+       *(port2adr(port)) = value;
+       delay();
+}
+
+void se7206_outw(unsigned short value, unsigned long port)
+{
+       *port2adr(port) = value;
+}
+
+void se7206_outl(unsigned int value, unsigned long port)
+{
+       maybebadio(port);
+}
+
+void se7206_insb(unsigned long port, void *addr, unsigned long count)
+{
+       volatile __u16 *p = port2adr(port);
+       __u8 *ap = addr;
+
+       while (count--)
+               *ap++ = *p;
+}
+
+void se7206_insw(unsigned long port, void *addr, unsigned long count)
+{
+       volatile __u16 *p = port2adr(port);
+       __u16 *ap = addr;
+       while (count--)
+               *ap++ = *p;
+}
+
+void se7206_insl(unsigned long port, void *addr, unsigned long count)
+{
+       maybebadio(port);
+}
+
+void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
+{
+       volatile __u16 *p = port2adr(port);
+       const __u8 *ap = addr;
+
+       while (count--)
+               *p = *ap++;
+}
+
+void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
+{
+       volatile __u16 *p = port2adr(port);
+       const __u16 *ap = addr;
+       while (count--)
+               *p = *ap++;
+}
+
+void se7206_outsl(unsigned long port, const void *addr, unsigned long count)
+{
+       maybebadio(port);
+}
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c
new file mode 100644 (file)
index 0000000..3fb0c5f
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/sh/boards/se/7206/irq.c
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/se7206.h>
+
+#define INTSTS0 0x31800000
+#define INTSTS1 0x31800002
+#define INTMSK0 0x31800004
+#define INTMSK1 0x31800006
+#define INTSEL  0x31800008
+
+static void disable_se7206_irq(unsigned int irq)
+{
+       unsigned short val;
+       unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq)));
+       unsigned short msk0,msk1;
+
+       /* Set the priority in IPR to 0 */
+       val = ctrl_inw(INTC_IPR01);
+       val &= mask;
+       ctrl_outw(val, INTC_IPR01);
+       /* FPGA mask set */
+       msk0 = ctrl_inw(INTMSK0);
+       msk1 = ctrl_inw(INTMSK1);
+
+       switch (irq) {
+       case IRQ0_IRQ:
+               msk0 |= 0x0010;
+               break;
+       case IRQ1_IRQ:
+               msk0 |= 0x000f;
+               break;
+       case IRQ2_IRQ:
+               msk0 |= 0x0f00;
+               msk1 |= 0x00ff;
+               break;
+       }
+       ctrl_outw(msk0, INTMSK0);
+       ctrl_outw(msk1, INTMSK1);
+}
+
+static void enable_se7206_irq(unsigned int irq)
+{
+       unsigned short val;
+       unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq)));
+       unsigned short msk0,msk1;
+
+       /* Set priority in IPR back to original value */
+       val = ctrl_inw(INTC_IPR01);
+       val |= value;
+       ctrl_outw(val, INTC_IPR01);
+
+       /* FPGA mask reset */
+       msk0 = ctrl_inw(INTMSK0);
+       msk1 = ctrl_inw(INTMSK1);
+
+       switch (irq) {
+       case IRQ0_IRQ:
+               msk0 &= ~0x0010;
+               break;
+       case IRQ1_IRQ:
+               msk0 &= ~0x000f;
+               break;
+       case IRQ2_IRQ:
+               msk0 &= ~0x0f00;
+               msk1 &= ~0x00ff;
+               break;
+       }
+       ctrl_outw(msk0, INTMSK0);
+       ctrl_outw(msk1, INTMSK1);
+}
+
+static void eoi_se7206_irq(unsigned int irq)
+{
+       unsigned short sts0,sts1;
+
+       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+               enable_se7206_irq(irq);
+       /* FPGA isr clear */
+       sts0 = ctrl_inw(INTSTS0);
+       sts1 = ctrl_inw(INTSTS1);
+
+       switch (irq) {
+       case IRQ0_IRQ:
+               sts0 &= ~0x0010;
+               break;
+       case IRQ1_IRQ:
+               sts0 &= ~0x000f;
+               break;
+       case IRQ2_IRQ:
+               sts0 &= ~0x0f00;
+               sts1 &= ~0x00ff;
+               break;
+       }
+       ctrl_outw(sts0, INTSTS0);
+       ctrl_outw(sts1, INTSTS1);
+}
+
+static struct irq_chip se7206_irq_chip __read_mostly = {
+       .name           = "SE7206-FPGA-IRQ",
+       .mask           = disable_se7206_irq,
+       .unmask         = enable_se7206_irq,
+       .mask_ack       = disable_se7206_irq,
+       .eoi            = eoi_se7206_irq,
+};
+
+static void make_se7206_irq(unsigned int irq)
+{
+       disable_irq_nosync(irq);
+       set_irq_chip_and_handler_name(irq, &se7206_irq_chip,
+                                     handle_level_irq, "level");
+       disable_se7206_irq(irq);
+}
+
+/*
+ * Initialize IRQ setting
+ */
+void __init init_se7206_IRQ(void)
+{
+       make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
+       make_se7206_irq(IRQ1_IRQ); /* ATA */
+       make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
+       ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+       /* FPGA System register setup*/
+       ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */
+       ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */
+       /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
+       ctrl_outw(0x0001,INTSEL);
+}
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c
new file mode 100644 (file)
index 0000000..ef79460
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/sh/kernel/led_se.c
+ *
+ * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * This file contains Solution Engine specific LED code.
+ */
+
+#include <linux/config.h>
+#include <asm/se7206.h>
+
+#ifdef CONFIG_HEARTBEAT
+
+#include <linux/sched.h>
+
+/* Cycle the LED's in the clasic Knightrider/Sun pattern */
+void heartbeat_se(void)
+{
+       static unsigned int cnt = 0, period = 0;
+       volatile unsigned short* p = (volatile unsigned short*)PA_LED;
+       static unsigned bit = 0, up = 1;
+
+       cnt += 1;
+       if (cnt < period) {
+               return;
+       }
+
+       cnt = 0;
+
+       /* Go through the points (roughly!):
+        * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110
+        */
+       period = 110 - ( (300<<FSHIFT)/
+                        ((avenrun[0]/5) + (3<<FSHIFT)) );
+
+       if (up) {
+               if (bit == 7) {
+                       bit--;
+                       up=0;
+               } else {
+                       bit ++;
+               }
+       } else {
+               if (bit == 0) {
+                       bit++;
+                       up=1;
+               } else {
+                       bit--;
+               }
+       }
+       *p = 1<<(bit+8);
+
+}
+#endif /* CONFIG_HEARTBEAT */
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c
new file mode 100644 (file)
index 0000000..0f42e91
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7206/setup.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * Hitachi 7206 SolutionEngine Support.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/se7206.h>
+#include <asm/io.h>
+#include <asm/machvec.h>
+
+static struct resource smc91x_resources[] = {
+       [0] = {
+               .start          = 0x300,
+               .end            = 0x300 + 0x020 - 1,
+               .flags          = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start          = 64,
+               .end            = 64,
+               .flags          = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device smc91x_device = {
+       .name           = "smc91x",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(smc91x_resources),
+       .resource       = smc91x_resources,
+};
+
+static int __init se7206_devices_setup(void)
+{
+       return platform_device_register(&smc91x_device);
+}
+
+__initcall(se7206_devices_setup);
+
+void heartbeat_se(void);
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+       .mv_name                = "SolutionEngine",
+       .mv_nr_irqs             = 256,
+       .mv_inb                 = se7206_inb,
+       .mv_inw                 = se7206_inw,
+       .mv_inl                 = se7206_inl,
+       .mv_outb                = se7206_outb,
+       .mv_outw                = se7206_outw,
+       .mv_outl                = se7206_outl,
+
+       .mv_inb_p               = se7206_inb_p,
+       .mv_inw_p               = se7206_inw,
+       .mv_inl_p               = se7206_inl,
+       .mv_outb_p              = se7206_outb_p,
+       .mv_outw_p              = se7206_outw,
+       .mv_outl_p              = se7206_outl,
+
+       .mv_insb                = se7206_insb,
+       .mv_insw                = se7206_insw,
+       .mv_insl                = se7206_insl,
+       .mv_outsb               = se7206_outsb,
+       .mv_outsw               = se7206_outsw,
+       .mv_outsl               = se7206_outsl,
+
+       .mv_init_irq            = init_se7206_IRQ,
+#ifdef CONFIG_HEARTBEAT
+       .mv_heartbeat           = heartbeat_se,
+#endif
+};
+ALIAS_MV(se)
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile
new file mode 100644 (file)
index 0000000..3666eca
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the 7619 SolutionEngine specific parts of the kernel
+#
+
+obj-y   := setup.o io.o
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c
new file mode 100644 (file)
index 0000000..176f1f3
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ *
+ * linux/arch/sh/boards/se/7619/io.c
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * I/O routine for Hitachi 7619 SolutionEngine.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/irq.h>
+
+/* FIXME: M3A-ZAB7 Compact Flash Slot support */
+
+static inline void delay(void)
+{
+       ctrl_inw(0xa0000000);   /* Uncached ROM area (P2) */
+}
+
+#define badio(name,port) \
+  printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \
+        #name, (port), (__u32) __builtin_return_address(0))
+
+unsigned char se7619___inb(unsigned long port)
+{
+       badio(inb, port);
+       return 0;
+}
+
+unsigned char se7619___inb_p(unsigned long port)
+{
+       badio(inb_p, port);
+       delay();
+       return 0;
+}
+
+unsigned short se7619___inw(unsigned long port)
+{
+       badio(inw, port);
+       return 0;
+}
+
+unsigned int se7619___inl(unsigned long port)
+{
+       badio(inl, port);
+       return 0;
+}
+
+void se7619___outb(unsigned char value, unsigned long port)
+{
+       badio(outb, port);
+}
+
+void se7619___outb_p(unsigned char value, unsigned long port)
+{
+       badio(outb_p, port);
+       delay();
+}
+
+void se7619___outw(unsigned short value, unsigned long port)
+{
+       badio(outw, port);
+}
+
+void se7619___outl(unsigned int value, unsigned long port)
+{
+       badio(outl, port);
+}
+
+void se7619___insb(unsigned long port, void *addr, unsigned long count)
+{
+       badio(inw, port);
+}
+
+void se7619___insw(unsigned long port, void *addr, unsigned long count)
+{
+       badio(inw, port);
+}
+
+void se7619___insl(unsigned long port, void *addr, unsigned long count)
+{
+       badio(insl, port);
+}
+
+void se7619___outsb(unsigned long port, const void *addr, unsigned long count)
+{
+       badio(insl, port);
+}
+
+void se7619___outsw(unsigned long port, const void *addr, unsigned long count)
+{
+       badio(insl, port);
+}
+
+void se7619___outsl(unsigned long port, const void *addr, unsigned long count)
+{
+       badio(outsw, port);
+}
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c
new file mode 100644 (file)
index 0000000..e627b26
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/boards/se/7619/setup.c
+ *
+ * Copyright (C) 2006 Yoshinori Sato
+ *
+ * Hitachi SH7619 SolutionEngine Support.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/se7619.h>
+#include <asm/machvec.h>
+
+/*
+ * The Machine Vector
+ */
+
+struct sh_machine_vector mv_se __initmv = {
+       .mv_name                = "SolutionEngine",
+       .mv_nr_irqs             = 108,
+       .mv_inb                 = se7619___inb,
+       .mv_inw                 = se7619___inw,
+       .mv_inl                 = se7619___inl,
+       .mv_outb                = se7619___outb,
+       .mv_outw                = se7619___outw,
+       .mv_outl                = se7619___outl,
+
+       .mv_inb_p               = se7619___inb_p,
+       .mv_inw_p               = se7619___inw,
+       .mv_inl_p               = se7619___inl,
+       .mv_outb_p              = se7619___outb_p,
+       .mv_outw_p              = se7619___outw,
+       .mv_outl_p              = se7619___outl,
+
+       .mv_insb                = se7619___insb,
+       .mv_insw                = se7619___insw,
+       .mv_insl                = se7619___insl,
+       .mv_outsb               = se7619___outsb,
+       .mv_outsw               = se7619___outsw,
+       .mv_outsl               = se7619___outsl,
+};
+ALIAS_MV(se)
index a6046d93758b80430d25d422c6374bb755061a63..6bcd939bfaed4899783bf579eabd3011e9221c32 100644 (file)
@@ -1,26 +1,30 @@
 /*
- *     Setup for Titan
+ * arch/sh/boards/titan/setup.c - Setup for Titan
+ *
+ *  Copyright (C) 2006  Jamie Lenehan
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
-
 #include <linux/init.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/titan.h>
 #include <asm/io.h>
 
-extern void __init pcibios_init_platform(void);
-
 static struct ipr_data titan_ipr_map[] = {
-       { TITAN_IRQ_WAN,        IRL0_IPR_ADDR,  IRL0_IPR_POS,   IRL0_PRIORITY },
-       { TITAN_IRQ_LAN,        IRL1_IPR_ADDR,  IRL1_IPR_POS,   IRL1_PRIORITY },
-       { TITAN_IRQ_MPCIA,      IRL2_IPR_ADDR,  IRL2_IPR_POS,   IRL2_PRIORITY },
-       { TITAN_IRQ_USB,        IRL3_IPR_ADDR,  IRL3_IPR_POS,   IRL3_PRIORITY },
+       /* IRQ, IPR idx, shift, prio */
+       { TITAN_IRQ_WAN,   3, 12, 8 },  /* eth0 (WAN) */
+       { TITAN_IRQ_LAN,   3,  8, 8 },  /* eth1 (LAN) */
+       { TITAN_IRQ_MPCIA, 3,  4, 8 },  /* mPCI A (top) */
+       { TITAN_IRQ_USB,   3,  0, 8 },  /* mPCI B (bottom), USB */
 };
 
 static void __init init_titan_irq(void)
 {
        /* enable individual interrupt mode for externals */
-       ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
-
+       ipr_irq_enable_irlm();
+       /* register ipr irqs */
        make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map));
 }
 
@@ -47,6 +51,5 @@ struct sh_machine_vector mv_titan __initmv = {
        .mv_ioport_map = titan_ioport_map,
 
        .mv_init_irq =  init_titan_irq,
-       .mv_init_pci =  pcibios_init_platform,
 };
 ALIAS_MV(titan)
index f2fed5ce5cc315b0dac14a1ffa6f568a77d6b950..35452d85b7f7f21ccf341ae3af407ed679dfb450 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <asm/uaccess.h>
+#include <asm/addrspace.h>
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
 #endif
@@ -228,7 +229,7 @@ long* stack_start = &user_stack[STACK_SIZE];
 void decompress_kernel(void)
 {
        output_data = 0;
-       output_ptr = (unsigned long)&_text+0x20001000;
+       output_ptr = P2SEGADDR((unsigned long)&_text+0x1000);
        free_mem_ptr = (unsigned long)&_end;
        free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
 
index 34e2046c3213db5e69ac9492ef687401fd70813a..2b75b4896ba5ed44a996e37b19e1945a31f2ed80 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc3
-# Tue Oct 31 12:32:06 2006
+# Linux kernel version: 2.6.19
+# Wed Dec  6 11:59:38 2006
 #
 CONFIG_SUPERH=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
@@ -11,6 +11,8 @@ CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 # CONFIG_GENERIC_TIME is not set
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
 #
@@ -37,6 +39,7 @@ CONFIG_BSD_PROCESS_ACCT=y
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+# CONFIG_SYSFS_DEPRECATED is not set
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -118,6 +121,8 @@ CONFIG_SH_R7780RP=y
 # CONFIG_SH_LANDISK is not set
 # CONFIG_SH_TITAN is not set
 # CONFIG_SH_SHMIN is not set
+# CONFIG_SH_7206_SOLUTION_ENGINE is not set
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
 # CONFIG_SH_UNKNOWN is not set
 
 #
@@ -130,6 +135,12 @@ CONFIG_CPU_SH4A=y
 # SH-2 Processor Support
 #
 # CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7206 is not set
 
 #
 # SH-3 Processor Support
@@ -165,6 +176,7 @@ CONFIG_CPU_SH4A=y
 #
 # CONFIG_CPU_SUBTYPE_SH7770 is not set
 CONFIG_CPU_SUBTYPE_SH7780=y
+# CONFIG_CPU_SUBTYPE_SH7785 is not set
 
 #
 # SH4AL-DSP Processor Support
@@ -181,8 +193,14 @@ CONFIG_MEMORY_START=0x08000000
 CONFIG_MEMORY_SIZE=0x08000000
 # CONFIG_32BIT is not set
 CONFIG_VSYSCALL=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
 CONFIG_HUGETLB_PAGE_SIZE_64K=y
+# CONFIG_HUGETLB_PAGE_SIZE_256K is not set
 # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_64MB is not set
 CONFIG_SELECT_MEMORY_MODEL=y
 CONFIG_FLATMEM_MANUAL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -204,12 +222,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
 # Processor features
 #
 CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_CPU_BIG_ENDIAN is not set
 CONFIG_SH_FPU=y
 # CONFIG_SH_DSP is not set
 CONFIG_SH_STORE_QUEUES=y
 CONFIG_CPU_HAS_INTEVT=y
 CONFIG_CPU_HAS_INTC2_IRQ=y
 CONFIG_CPU_HAS_SR_RB=y
+CONFIG_CPU_HAS_PTEA=y
 
 #
 # Timer support
@@ -220,6 +240,8 @@ CONFIG_SH_TMU=y
 # R7780RP options
 #
 CONFIG_SH_R7780MP=y
+CONFIG_SH_TIMER_IRQ=28
+CONFIG_NO_IDLE_HZ=y
 CONFIG_SH_PCLK_FREQ=32000000
 
 #
@@ -237,6 +259,11 @@ CONFIG_SH_PCLK_FREQ=32000000
 #
 # CONFIG_HD6446X_SERIES is not set
 
+#
+# Additional SuperH Device Drivers
+#
+CONFIG_PUSH_SWITCH=y
+
 #
 # Kernel features
 #
@@ -244,7 +271,7 @@ CONFIG_SH_PCLK_FREQ=32000000
 CONFIG_HZ_250=y
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=250
-# CONFIG_KEXEC is not set
+CONFIG_KEXEC=y
 # CONFIG_SMP is not set
 # CONFIG_PREEMPT_NONE is not set
 # CONFIG_PREEMPT_VOLUNTARY is not set
@@ -278,10 +305,7 @@ CONFIG_PCI_AUTO_UPDATE_RESOURCES=y
 #
 # PCI Hotplug Support
 #
-CONFIG_HOTPLUG_PCI=y
-# CONFIG_HOTPLUG_PCI_FAKE is not set
-# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_SHPC is not set
+# CONFIG_HOTPLUG_PCI is not set
 
 #
 # Executable file formats
@@ -341,6 +365,7 @@ CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
 # CONFIG_INET6_XFRM_TUNNEL is not set
 # CONFIG_INET6_TUNNEL is not set
@@ -556,6 +581,7 @@ CONFIG_SATA_SIL=y
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -572,6 +598,7 @@ CONFIG_SATA_SIL=y
 # CONFIG_PATA_SIS is not set
 # CONFIG_PATA_VIA is not set
 # CONFIG_PATA_WINBOND is not set
+CONFIG_PATA_PLATFORM=y
 
 #
 # Multi-device support (RAID and LVM)
@@ -688,6 +715,7 @@ CONFIG_R8169=y
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -830,10 +858,6 @@ CONFIG_HW_RANDOM=y
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 # CONFIG_DRM is not set
 # CONFIG_RAW_DRIVER is not set
 
@@ -1020,7 +1044,7 @@ CONFIG_INOTIFY_USER=y
 CONFIG_DNOTIFY=y
 # CONFIG_AUTOFS_FS is not set
 # CONFIG_AUTOFS4_FS is not set
-# CONFIG_FUSE_FS is not set
+CONFIG_FUSE_FS=m
 
 #
 # CD-ROM/DVD Filesystems
@@ -1052,7 +1076,7 @@ CONFIG_TMPFS=y
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 CONFIG_RAMFS=y
-# CONFIG_CONFIGFS_FS is not set
+CONFIG_CONFIGFS_FS=m
 
 #
 # Miscellaneous filesystems
@@ -1153,28 +1177,33 @@ CONFIG_NLS_ISO8859_1=y
 #
 # Profiling support
 #
-# CONFIG_PROFILING is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
 
 #
 # Kernel hacking
 #
-# CONFIG_PRINTK_TIME is not set
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_PRINTK_TIME=y
 CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_KERNEL=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_SCHEDSTATS is not set
 # CONFIG_DEBUG_SLAB is not set
-CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 # CONFIG_DEBUG_VM is not set
 # CONFIG_DEBUG_LIST is not set
@@ -1184,7 +1213,7 @@ CONFIG_FORCED_INLINING=y
 # CONFIG_RCU_TORTURE_TEST is not set
 # CONFIG_SH_STANDARD_BIOS is not set
 # CONFIG_EARLY_SCIF_CONSOLE is not set
-# CONFIG_DEBUG_STACKOVERFLOW is not set
+CONFIG_DEBUG_STACKOVERFLOW=y
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_4KSTACKS is not set
 # CONFIG_KGDB is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
new file mode 100644 (file)
index 0000000..36cec0b
--- /dev/null
@@ -0,0 +1,826 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.19-rc4
+# Sun Nov  5 16:20:10 2006
+#
+CONFIG_SUPERH=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_GENERIC_TIME is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_TINY_SHMEM=y
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
+# System type
+#
+# CONFIG_SH_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SOLUTION_ENGINE is not set
+# CONFIG_SH_7300_SOLUTION_ENGINE is not set
+# CONFIG_SH_7343_SOLUTION_ENGINE is not set
+# CONFIG_SH_73180_SOLUTION_ENGINE is not set
+# CONFIG_SH_7751_SYSTEMH is not set
+# CONFIG_SH_HP6XX is not set
+# CONFIG_SH_EC3104 is not set
+# CONFIG_SH_SATURN is not set
+# CONFIG_SH_DREAMCAST is not set
+# CONFIG_SH_BIGSUR is not set
+# CONFIG_SH_MPC1211 is not set
+# CONFIG_SH_SH03 is not set
+# CONFIG_SH_SECUREEDGE5410 is not set
+# CONFIG_SH_HS7751RVOIP is not set
+# CONFIG_SH_7710VOIPGW is not set
+# CONFIG_SH_RTS7751R2D is not set
+# CONFIG_SH_R7780RP is not set
+# CONFIG_SH_EDOSK7705 is not set
+# CONFIG_SH_SH4202_MICRODEV is not set
+# CONFIG_SH_LANDISK is not set
+# CONFIG_SH_TITAN is not set
+# CONFIG_SH_SHMIN is not set
+CONFIG_SH_7206_SOLUTION_ENGINE=y
+# CONFIG_SH_7619_SOLUTION_ENGINE is not set
+# CONFIG_SH_UNKNOWN is not set
+
+#
+# Processor selection
+#
+CONFIG_CPU_SH2=y
+CONFIG_CPU_SH2A=y
+
+#
+# SH-2 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7604 is not set
+# CONFIG_CPU_SUBTYPE_SH7619 is not set
+
+#
+# SH-2A Processor Support
+#
+CONFIG_CPU_SUBTYPE_SH7206=y
+
+#
+# SH-3 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7300 is not set
+# CONFIG_CPU_SUBTYPE_SH7705 is not set
+# CONFIG_CPU_SUBTYPE_SH7706 is not set
+# CONFIG_CPU_SUBTYPE_SH7707 is not set
+# CONFIG_CPU_SUBTYPE_SH7708 is not set
+# CONFIG_CPU_SUBTYPE_SH7709 is not set
+# CONFIG_CPU_SUBTYPE_SH7710 is not set
+
+#
+# SH-4 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7750 is not set
+# CONFIG_CPU_SUBTYPE_SH7091 is not set
+# CONFIG_CPU_SUBTYPE_SH7750R is not set
+# CONFIG_CPU_SUBTYPE_SH7750S is not set
+# CONFIG_CPU_SUBTYPE_SH7751 is not set
+# CONFIG_CPU_SUBTYPE_SH7751R is not set
+# CONFIG_CPU_SUBTYPE_SH7760 is not set
+# CONFIG_CPU_SUBTYPE_SH4_202 is not set
+
+#
+# ST40 Processor Support
+#
+# CONFIG_CPU_SUBTYPE_ST40STB1 is not set
+# CONFIG_CPU_SUBTYPE_ST40GX1 is not set
+
+#
+# SH-4A Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH7770 is not set
+# CONFIG_CPU_SUBTYPE_SH7780 is not set
+
+#
+# SH4AL-DSP Processor Support
+#
+# CONFIG_CPU_SUBTYPE_SH73180 is not set
+# CONFIG_CPU_SUBTYPE_SH7343 is not set
+
+#
+# Memory management options
+#
+CONFIG_PAGE_OFFSET=0x00000000
+CONFIG_MEMORY_START=0x0c000000
+CONFIG_MEMORY_SIZE=0x02000000
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+
+#
+# Cache configuration
+#
+# CONFIG_SH_DIRECT_MAPPED is not set
+# CONFIG_SH_WRITETHROUGH is not set
+# CONFIG_SH_OCRAM is not set
+
+#
+# Processor features
+#
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+# CONFIG_SH_FPU is not set
+# CONFIG_SH_FPU_EMU is not set
+# CONFIG_SH_DSP is not set
+
+#
+# Timer support
+#
+CONFIG_SH_CMT=y
+# CONFIG_SH_MTU2 is not set
+CONFIG_SH_PCLK_FREQ=33333333
+CONFIG_SH_CLK_MD=6
+
+#
+# CPU Frequency scaling
+#
+# CONFIG_CPU_FREQ is not set
+
+#
+# DMA support
+#
+# CONFIG_SH_DMA is not set
+
+#
+# Companion Chips
+#
+# CONFIG_HD6446X_SERIES is not set
+
+#
+# Kernel features
+#
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_KEXEC is not set
+# CONFIG_SMP is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+
+#
+# Boot options
+#
+CONFIG_ZERO_PAGE_OFFSET=0x00001000
+CONFIG_BOOT_LINK_OFFSET=0x00800000
+# CONFIG_UBC_WAKEUP is not set
+# CONFIG_CMDLINE_BOOL is not set
+
+#
+# Bus options
+#
+# CONFIG_PCI is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+
+#
+# PCI Hotplug Support
+#
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options (EXPERIMENTAL)
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_NETDEBUG is not set
+# CONFIG_PACKET is not set
+# CONFIG_UNIX is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
+# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
+# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x20000000
+CONFIG_MTD_PHYSMAP_LEN=0x1000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=4
+# CONFIG_MTD_SOLUTIONENGINE is not set
+# CONFIG_MTD_UCLINUX is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# Misc devices
+#
+# CONFIG_TIFM_CORE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+# CONFIG_NETDEVICES is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_SH_SCI_NR_UARTS=4
+CONFIG_SERIAL_SH_SCI_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_UNIX98_PTYS is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+# CONFIG_USB_ARCH_HAS_HCD is not set
+# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# InfiniBand support
+#
+
+#
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
+# DMA Engine support
+#
+# CONFIG_DMA_ENGINE is not set
+
+#
+# DMA Clients
+#
+
+#
+# DMA Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+# CONFIG_SYSFS is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLBFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_UNWIND_INFO is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_SH_STANDARD_BIOS is not set
+# CONFIG_EARLY_SCIF_CONSOLE is not set
+# CONFIG_KGDB is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig
new file mode 100644 (file)
index 0000000..c54c758
--- /dev/null
@@ -0,0 +1,9 @@
+menu "Additional SuperH Device Drivers"
+
+config PUSH_SWITCH
+       tristate "Push switch support"
+       help
+         This enables support for the push switch framework, a simple
+         framework that allows for sysfs driven switch status reporting.
+
+endmenu
index 338c3729d2705095a338743e2cb6b5528275dbd2..bf18dbfb6787db618d339e082b08bd0a8d6e2713 100644 (file)
@@ -5,4 +5,4 @@
 obj-$(CONFIG_PCI)              += pci/
 obj-$(CONFIG_SH_DMA)           += dma/
 obj-$(CONFIG_SUPERHYWAY)       += superhyway/
-
+obj-$(CONFIG_PUSH_SWITCH)      += push-switch.o
index 065d4c90970ea51956d0e67c9a02d11315c4213e..db1295d32268b51fdd393227bac89a0347004696 100644 (file)
@@ -2,8 +2,8 @@
 # Makefile for the SuperH DMA specific kernel interface routines under Linux.
 #
 
-obj-y                          += dma-api.o dma-isa.o
+obj-y                          += dma-api.o
+obj-$(CONFIG_ISA_DMA_API)      += dma-isa.o
 obj-$(CONFIG_SYSFS)            += dma-sysfs.o
 obj-$(CONFIG_SH_DMA)           += dma-sh.o
 obj-$(CONFIG_SH_DREAMCAST)     += dma-pvr2.o dma-g2.o
-
index 47c3e837599b755c90a57d246dfe9f68c606635c..e062067edd243fac0e2bdacd12ee3fdf1600fdfc 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
 #include <linux/platform_device.h>
+#include <linux/mm.h>
 #include <asm/dma.h>
 
 DEFINE_SPINLOCK(dma_spin_lock);
 static LIST_HEAD(registered_dmac_list);
 
-/*
- * A brief note about the reasons for this API as it stands.
- *
- * For starters, the old ISA DMA API didn't work for us for a number of
- * reasons, for one, the vast majority of channels on the SH DMAC are
- * dual-address mode only, and both the new and the old DMA APIs are after the
- * concept of managing a DMA buffer, which doesn't overly fit this model very
- * well. In addition to which, the new API is largely geared at IOMMUs and
- * GARTs, and doesn't even support the channel notion very well.
- *
- * The other thing that's a marginal issue, is the sheer number of random DMA
- * engines that are present (ie, in boards like the Dreamcast), some of which
- * cascade off of the SH DMAC, and others do not. As such, there was a real
- * need for a scalable subsystem that could deal with both single and
- * dual-address mode usage, in addition to interoperating with cascaded DMACs.
- *
- * There really isn't any reason why this needs to be SH specific, though I'm
- * not aware of too many other processors (with the exception of some MIPS)
- * that have the same concept of a dual address mode, or any real desire to
- * actually make use of the DMAC even if such a subsystem were exposed
- * elsewhere.
- *
- * The idea for this was derived from the ARM port, which acted as an excellent
- * reference when trying to address these issues.
- *
- * It should also be noted that the decision to add Yet Another DMA API(tm) to
- * the kernel wasn't made easily, and was only decided upon after conferring
- * with jejb with regards to the state of the old and new APIs as they applied
- * to these circumstances. Philip Blundell was also a great help in figuring
- * out some single-address mode DMA semantics that were otherwise rather
- * confusing.
- */
-
 struct dma_info *get_dma_info(unsigned int chan)
 {
        struct dma_info *info;
-       unsigned int total = 0;
 
        /*
         * Look for each DMAC's range to determine who the owner of
         * the channel is.
         */
        list_for_each_entry(info, &registered_dmac_list, list) {
-               total += info->nr_channels;
-               if (chan > total)
+               if ((chan <  info->first_channel_nr) ||
+                   (chan >= info->first_channel_nr + info->nr_channels))
                        continue;
 
                return info;
@@ -73,6 +39,22 @@ struct dma_info *get_dma_info(unsigned int chan)
 
        return NULL;
 }
+EXPORT_SYMBOL(get_dma_info);
+
+struct dma_info *get_dma_info_by_name(const char *dmac_name)
+{
+       struct dma_info *info;
+
+       list_for_each_entry(info, &registered_dmac_list, list) {
+               if (dmac_name && (strcmp(dmac_name, info->name) != 0))
+                       continue;
+               else
+                       return info;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL(get_dma_info_by_name);
 
 static unsigned int get_nr_channels(void)
 {
@@ -91,63 +73,161 @@ static unsigned int get_nr_channels(void)
 struct dma_channel *get_dma_channel(unsigned int chan)
 {
        struct dma_info *info = get_dma_info(chan);
+       struct dma_channel *channel;
+       int i;
 
-       if (!info)
+       if (unlikely(!info))
                return ERR_PTR(-EINVAL);
 
-       return info->channels + chan;
+       for (i = 0; i < info->nr_channels; i++) {
+               channel = &info->channels[i];
+               if (channel->chan == chan)
+                       return channel;
+       }
+
+       return NULL;
 }
+EXPORT_SYMBOL(get_dma_channel);
 
 int get_dma_residue(unsigned int chan)
 {
        struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       struct dma_channel *channel = get_dma_channel(chan);
 
        if (info->ops->get_residue)
                return info->ops->get_residue(channel);
 
        return 0;
 }
+EXPORT_SYMBOL(get_dma_residue);
 
-int request_dma(unsigned int chan, const char *dev_id)
+static int search_cap(const char **haystack, const char *needle)
 {
-       struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       const char **p;
+
+       for (p = haystack; *p; p++)
+               if (strcmp(*p, needle) == 0)
+                       return 1;
+
+       return 0;
+}
+
+/**
+ * request_dma_bycap - Allocate a DMA channel based on its capabilities
+ * @dmac: List of DMA controllers to search
+ * @caps: List of capabilites
+ *
+ * Search all channels of all DMA controllers to find a channel which
+ * matches the requested capabilities. The result is the channel
+ * number if a match is found, or %-ENODEV if no match is found.
+ *
+ * Note that not all DMA controllers export capabilities, in which
+ * case they can never be allocated using this API, and so
+ * request_dma() must be used specifying the channel number.
+ */
+int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
+{
+       unsigned int found = 0;
+       struct dma_info *info;
+       const char **p;
+       int i;
+
+       BUG_ON(!dmac || !caps);
+
+       list_for_each_entry(info, &registered_dmac_list, list)
+               if (strcmp(*dmac, info->name) == 0) {
+                       found = 1;
+                       break;
+               }
+
+       if (!found)
+               return -ENODEV;
+
+       for (i = 0; i < info->nr_channels; i++) {
+               struct dma_channel *channel = &info->channels[i];
+
+               if (unlikely(!channel->caps))
+                       continue;
+
+               for (p = caps; *p; p++) {
+                       if (!search_cap(channel->caps, *p))
+                               break;
+                       if (request_dma(channel->chan, dev_id) == 0)
+                               return channel->chan;
+               }
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(request_dma_bycap);
+
+int dmac_search_free_channel(const char *dev_id)
+{
+       struct dma_channel *channel = { 0 };
+       struct dma_info *info = get_dma_info(0);
+       int i;
+
+       for (i = 0; i < info->nr_channels; i++) {
+               channel = &info->channels[i];
+               if (unlikely(!channel))
+                       return -ENODEV;
+
+               if (atomic_read(&channel->busy) == 0)
+                       break;
+       }
 
-       down(&channel->sem);
+       if (info->ops->request) {
+               int result = info->ops->request(channel);
+               if (result)
+                       return result;
 
-       if (!info->ops || chan >= MAX_DMA_CHANNELS) {
-               up(&channel->sem);
-               return -EINVAL;
+               atomic_set(&channel->busy, 1);
+               return channel->chan;
        }
 
-       atomic_set(&channel->busy, 1);
+       return -ENOSYS;
+}
+
+int request_dma(unsigned int chan, const char *dev_id)
+{
+       struct dma_channel *channel = { 0 };
+       struct dma_info *info = get_dma_info(chan);
+       int result;
+
+       channel = get_dma_channel(chan);
+       if (atomic_xchg(&channel->busy, 1))
+               return -EBUSY;
 
        strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
 
-       up(&channel->sem);
+       if (info->ops->request) {
+               result = info->ops->request(channel);
+               if (result)
+                       atomic_set(&channel->busy, 0);
 
-       if (info->ops->request)
-               return info->ops->request(channel);
+               return result;
+       }
 
        return 0;
 }
+EXPORT_SYMBOL(request_dma);
 
 void free_dma(unsigned int chan)
 {
        struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       struct dma_channel *channel = get_dma_channel(chan);
 
        if (info->ops->free)
                info->ops->free(channel);
 
        atomic_set(&channel->busy, 0);
 }
+EXPORT_SYMBOL(free_dma);
 
 void dma_wait_for_completion(unsigned int chan)
 {
        struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       struct dma_channel *channel = get_dma_channel(chan);
 
        if (channel->flags & DMA_TEI_CAPABLE) {
                wait_event(channel->wait_queue,
@@ -158,21 +238,52 @@ void dma_wait_for_completion(unsigned int chan)
        while (info->ops->get_residue(channel))
                cpu_relax();
 }
+EXPORT_SYMBOL(dma_wait_for_completion);
+
+int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
+{
+       struct dma_info *info;
+       unsigned int found = 0;
+       int i;
+
+       list_for_each_entry(info, &registered_dmac_list, list)
+               if (strcmp(dmac, info->name) == 0) {
+                       found = 1;
+                       break;
+               }
+
+       if (unlikely(!found))
+               return -ENODEV;
+
+       for (i = 0; i < info->nr_channels; i++, caps++) {
+               struct dma_channel *channel;
+
+               if ((info->first_channel_nr + i) != caps->ch_num)
+                       return -EINVAL;
+
+               channel = &info->channels[i];
+               channel->caps = caps->caplist;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(register_chan_caps);
 
 void dma_configure_channel(unsigned int chan, unsigned long flags)
 {
        struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       struct dma_channel *channel = get_dma_channel(chan);
 
        if (info->ops->configure)
                info->ops->configure(channel, flags);
 }
+EXPORT_SYMBOL(dma_configure_channel);
 
 int dma_xfer(unsigned int chan, unsigned long from,
             unsigned long to, size_t size, unsigned int mode)
 {
        struct dma_info *info = get_dma_info(chan);
-       struct dma_channel *channel = &info->channels[chan];
+       struct dma_channel *channel = get_dma_channel(chan);
 
        channel->sar    = from;
        channel->dar    = to;
@@ -181,8 +292,20 @@ int dma_xfer(unsigned int chan, unsigned long from,
 
        return info->ops->xfer(channel);
 }
+EXPORT_SYMBOL(dma_xfer);
+
+int dma_extend(unsigned int chan, unsigned long op, void *param)
+{
+       struct dma_info *info = get_dma_info(chan);
+       struct dma_channel *channel = get_dma_channel(chan);
+
+       if (info->ops->extend)
+               return info->ops->extend(channel, op, param);
+
+       return -ENOSYS;
+}
+EXPORT_SYMBOL(dma_extend);
 
-#ifdef CONFIG_PROC_FS
 static int dma_read_proc(char *buf, char **start, off_t off,
                         int len, int *eof, void *data)
 {
@@ -214,8 +337,6 @@ static int dma_read_proc(char *buf, char **start, off_t off,
 
        return p - buf;
 }
-#endif
-
 
 int register_dmac(struct dma_info *info)
 {
@@ -224,8 +345,7 @@ int register_dmac(struct dma_info *info)
        INIT_LIST_HEAD(&info->list);
 
        printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
-              info->name, info->nr_channels,
-              info->nr_channels > 1 ? "s" : "");
+              info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
 
        BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
 
@@ -242,28 +362,26 @@ int register_dmac(struct dma_info *info)
 
                size = sizeof(struct dma_channel) * info->nr_channels;
 
-               info->channels = kmalloc(size, GFP_KERNEL);
+               info->channels = kzalloc(size, GFP_KERNEL);
                if (!info->channels)
                        return -ENOMEM;
-
-               memset(info->channels, 0, size);
        }
 
        total_channels = get_nr_channels();
        for (i = 0; i < info->nr_channels; i++) {
-               struct dma_channel *chan = info->channels + i;
+               struct dma_channel *chan = &info->channels[i];
+
+               atomic_set(&chan->busy, 0);
 
-               chan->chan = i;
-               chan->vchan = i + total_channels;
+               chan->chan  = info->first_channel_nr + i;
+               chan->vchan = info->first_channel_nr + i + total_channels;
 
                memcpy(chan->dev_id, "Unused", 7);
 
                if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
                        chan->flags |= DMA_TEI_CAPABLE;
 
-               init_MUTEX(&chan->sem);
                init_waitqueue_head(&chan->wait_queue);
-
                dma_create_sysfs_files(chan, info);
        }
 
@@ -271,6 +389,7 @@ int register_dmac(struct dma_info *info)
 
        return 0;
 }
+EXPORT_SYMBOL(register_dmac);
 
 void unregister_dmac(struct dma_info *info)
 {
@@ -285,31 +404,16 @@ void unregister_dmac(struct dma_info *info)
        list_del(&info->list);
        platform_device_unregister(info->pdev);
 }
+EXPORT_SYMBOL(unregister_dmac);
 
 static int __init dma_api_init(void)
 {
-       printk("DMA: Registering DMA API.\n");
-
-#ifdef CONFIG_PROC_FS
+       printk(KERN_NOTICE "DMA: Registering DMA API.\n");
        create_proc_read_entry("dma", 0, 0, dma_read_proc, 0);
-#endif
-
        return 0;
 }
-
 subsys_initcall(dma_api_init);
 
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
 MODULE_DESCRIPTION("DMA API for SuperH");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
-EXPORT_SYMBOL(register_dmac);
-EXPORT_SYMBOL(get_dma_residue);
-EXPORT_SYMBOL(get_dma_info);
-EXPORT_SYMBOL(get_dma_channel);
-EXPORT_SYMBOL(dma_xfer);
-EXPORT_SYMBOL(dma_wait_for_completion);
-EXPORT_SYMBOL(dma_configure_channel);
-
index 66078601335009804e06213b82bef57a324b4765..f63721ed86c273fc20864fe107e0877e88d3b65c 100644 (file)
@@ -94,20 +94,13 @@ static int sh_dmac_request_dma(struct dma_channel *chan)
        if (unlikely(!chan->flags & DMA_TEI_CAPABLE))
                return 0;
 
-       chan->name = kzalloc(32, GFP_KERNEL);
-       if (unlikely(chan->name == NULL))
-               return -ENOMEM;
-       snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)",
-                chan->chan);
-
        return request_irq(get_dmte_irq(chan->chan), dma_tei,
-                          IRQF_DISABLED, chan->name, chan);
+                          IRQF_DISABLED, chan->dev_id, chan);
 }
 
 static void sh_dmac_free_dma(struct dma_channel *chan)
 {
        free_irq(get_dmte_irq(chan->chan), chan);
-       kfree(chan->name);
 }
 
 static void
index 29b8ef9873d19128f27c150275e8705b2d7357c5..eebcd4768bbf01a12ecbfd2cd6b06b0e60f3c5d8 100644 (file)
@@ -3,7 +3,7 @@
  *
  * sysfs interface for SH DMA API
  *
- * Copyright (C) 2004, 2005  Paul Mundt
+ * Copyright (C) 2004 - 2006  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -21,7 +21,6 @@
 static struct sysdev_class dma_sysclass = {
        set_kset_name("dma"),
 };
-
 EXPORT_SYMBOL(dma_sysclass);
 
 static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
@@ -31,7 +30,10 @@ static ssize_t dma_show_devices(struct sys_device *dev, char *buf)
 
        for (i = 0; i < MAX_DMA_CHANNELS; i++) {
                struct dma_info *info = get_dma_info(i);
-               struct dma_channel *channel = &info->channels[i];
+               struct dma_channel *channel = get_dma_channel(i);
+
+               if (unlikely(!info) || !channel)
+                       continue;
 
                len += sprintf(buf + len, "%2d: %14s    %s\n",
                               channel->chan, info->name,
@@ -125,11 +127,16 @@ int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
        if (ret)
                return ret;
 
-       sysdev_create_file(dev, &attr_dev_id);
-       sysdev_create_file(dev, &attr_count);
-       sysdev_create_file(dev, &attr_mode);
-       sysdev_create_file(dev, &attr_flags);
-       sysdev_create_file(dev, &attr_config);
+       ret |= sysdev_create_file(dev, &attr_dev_id);
+       ret |= sysdev_create_file(dev, &attr_count);
+       ret |= sysdev_create_file(dev, &attr_mode);
+       ret |= sysdev_create_file(dev, &attr_flags);
+       ret |= sysdev_create_file(dev, &attr_config);
+
+       if (unlikely(ret)) {
+               dev_err(&info->pdev->dev, "Failed creating attrs\n");
+               return ret;
+       }
 
        snprintf(name, sizeof(name), "dma%d", chan->chan);
        return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
index cd56d53375e7dc806c32c31cd2851058a622985b..ac8ee2312cd8935b3f8e53e0ef07d0592d866edd 100644 (file)
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/titan.h>
 #include "pci-sh4.h"
 
+static char titan_irq_tab[] __initdata = {
+       TITAN_IRQ_WAN,
+       TITAN_IRQ_LAN,
+       TITAN_IRQ_MPCIA,
+       TITAN_IRQ_MPCIB,
+       TITAN_IRQ_USB,
+};
+
 int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
 {
-       int irq = -1;
-
-       switch (slot) {
-       case 0: irq = TITAN_IRQ_WAN;   break;   /* eth0 (WAN) */
-       case 1: irq = TITAN_IRQ_LAN;   break;   /* eth1 (LAN) */
-       case 2: irq = TITAN_IRQ_MPCIA; break;   /* mPCI A */
-       case 3: irq = TITAN_IRQ_MPCIB; break;   /* mPCI B */
-       case 4: irq = TITAN_IRQ_USB;   break;   /* USB */
-       default:
-               printk(KERN_INFO "PCI: Bad IRQ mapping "
-                                "request for slot %d\n", slot);
-               return -1;
-       }
+       int irq = titan_irq_tab[slot];
 
        printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n",
                slot, pin - 1 + 'A', irq);
index d6e635296534201965be4c9ce72a806a154b3597..602b644c35ad66c8f84fe44d5e1365cb4b261457 100644 (file)
 #include <linux/delay.h>
 #include "pci-sh4.h"
 
+#define INTC_BASE      0xffd00000
+#define INTC_ICR0      (INTC_BASE+0x0)
+#define INTC_ICR1      (INTC_BASE+0x1c)
+#define INTC_INTPRI    (INTC_BASE+0x10)
+#define INTC_INTREQ    (INTC_BASE+0x24)
+#define INTC_INTMSK0   (INTC_BASE+0x44)
+#define INTC_INTMSK1   (INTC_BASE+0x48)
+#define INTC_INTMSK2   (INTC_BASE+0x40080)
+#define INTC_INTMSKCLR0        (INTC_BASE+0x64)
+#define INTC_INTMSKCLR1        (INTC_BASE+0x68)
+#define INTC_INTMSKCLR2        (INTC_BASE+0x40084)
+#define INTC_INT2MSKR  (INTC_BASE+0x40038)
+#define INTC_INT2MSKCR (INTC_BASE+0x4003c)
+
 /*
  * Initialization. Try all known PCI access methods. Note that we support
  * using both PCI BIOS and direct access: in such cases, we use I/O ports
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
new file mode 100644 (file)
index 0000000..f2b9157
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Generic push-switch framework
+ *
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <asm/push-switch.h>
+
+#define DRV_NAME "push-switch"
+#define DRV_VERSION "0.1.0"
+
+static ssize_t switch_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       struct push_switch_platform_info *psw_info = dev->platform_data;
+       return sprintf(buf, "%s\n", psw_info->name);
+}
+static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
+
+static void switch_timer(unsigned long data)
+{
+       struct push_switch *psw = (struct push_switch *)data;
+
+       schedule_work(&psw->work);
+}
+
+static void switch_work_handler(void *data)
+{
+       struct platform_device *pdev = data;
+       struct push_switch *psw = platform_get_drvdata(pdev);
+
+       psw->state = 0;
+
+       kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE);
+}
+
+static int switch_drv_probe(struct platform_device *pdev)
+{
+       struct push_switch_platform_info *psw_info;
+       struct push_switch *psw;
+       int ret, irq;
+
+       psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL);
+       if (unlikely(!psw))
+               return -ENOMEM;
+
+       irq = platform_get_irq(pdev, 0);
+       if (unlikely(irq < 0)) {
+               ret = -ENODEV;
+               goto err;
+       }
+
+       psw_info = pdev->dev.platform_data;
+       BUG_ON(!psw_info);
+
+       ret = request_irq(irq, psw_info->irq_handler,
+                         IRQF_DISABLED | psw_info->irq_flags,
+                         psw_info->name ? psw_info->name : DRV_NAME, pdev);
+       if (unlikely(ret < 0))
+               goto err;
+
+       if (psw_info->name) {
+               ret = device_create_file(&pdev->dev, &dev_attr_switch);
+               if (unlikely(ret)) {
+                       dev_err(&pdev->dev, "Failed creating device attrs\n");
+                       ret = -EINVAL;
+                       goto err_irq;
+               }
+       }
+
+       INIT_WORK(&psw->work, switch_work_handler, pdev);
+       init_timer(&psw->debounce);
+
+       psw->debounce.function = switch_timer;
+       psw->debounce.data = (unsigned long)psw;
+
+       platform_set_drvdata(pdev, psw);
+
+       return 0;
+
+err_irq:
+       free_irq(irq, pdev);
+err:
+       kfree(psw);
+       return ret;
+}
+
+static int switch_drv_remove(struct platform_device *pdev)
+{
+       struct push_switch *psw = platform_get_drvdata(pdev);
+       struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
+       int irq = platform_get_irq(pdev, 0);
+
+       if (psw_info->name)
+               device_remove_file(&pdev->dev, &dev_attr_switch);
+
+       platform_set_drvdata(pdev, NULL);
+       flush_scheduled_work();
+       del_timer_sync(&psw->debounce);
+       free_irq(irq, pdev);
+
+       kfree(psw);
+
+       return 0;
+}
+
+static struct platform_driver switch_driver = {
+       .probe          = switch_drv_probe,
+       .remove         = switch_drv_remove,
+       .driver         = {
+               .name   = DRV_NAME,
+       },
+};
+
+static int __init switch_init(void)
+{
+       printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
+       return platform_driver_register(&switch_driver);
+}
+
+static void __exit switch_exit(void)
+{
+       platform_driver_unregister(&switch_driver);
+}
+module_init(switch_init);
+module_exit(switch_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt");
+MODULE_LICENSE("GPLv2");
index 5da88a43d350f4d38d92956090f4dce201e17d6e..99c7e5249f7a6ca16a68911955e90e3abb357855 100644 (file)
@@ -4,7 +4,7 @@
 
 extra-y        := head.o init_task.o vmlinux.lds
 
-obj-y  := process.o signal.o entry.o traps.o irq.o \
+obj-y  := process.o signal.o traps.o irq.o \
        ptrace.o setup.o time.o sys_sh.o semaphore.o \
        io.o io_generic.o sh_ksyms.o syscalls.o
 
@@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_APM)              += apm.o
 obj-$(CONFIG_PM)               += pm.o
+obj-$(CONFIG_STACKTRACE)       += stacktrace.o
index fb5dac0693827df8316d15ec253b7005c0349b39..0582e6712b7927aaf6bf3841758e7ba39f5f0329 100644 (file)
@@ -2,11 +2,12 @@
 # Makefile for the Linux/SuperH CPU-specifc backends.
 #
 
-obj-y  += irq/ init.o clock.o
-
-obj-$(CONFIG_CPU_SH2)          += sh2/
-obj-$(CONFIG_CPU_SH3)          += sh3/
-obj-$(CONFIG_CPU_SH4)          += sh4/
+obj-$(CONFIG_CPU_SH2)          = sh2/
+obj-$(CONFIG_CPU_SH2A)         = sh2a/
+obj-$(CONFIG_CPU_SH3)          = sh3/
+obj-$(CONFIG_CPU_SH4)          = sh4/
 
 obj-$(CONFIG_UBC_WAKEUP)       += ubc.o
 obj-$(CONFIG_SH_ADC)           += adc.o
+
+obj-y  += irq/ init.o clock.o
index 51ec64cdf348ebb30c61ffab7be21e4dc2ffa1d6..abb586b125657a09e2b7f85579173313ec51cc8e 100644 (file)
@@ -5,9 +5,11 @@
  *
  * This clock framework is derived from the OMAP version by:
  *
- *     Copyright (C) 2004 Nokia Corporation
+ *     Copyright (C) 2004 - 2005 Nokia Corporation
  *     Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  *
+ *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
@@ -20,6 +22,7 @@
 #include <linux/kref.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
 #include <asm/clock.h>
 #include <asm/timer.h>
 
@@ -195,17 +198,37 @@ void clk_recalc_rate(struct clk *clk)
                propagate_rate(clk);
 }
 
-struct clk *clk_get(const char *id)
+/*
+ * Returns a clock. Note that we first try to use device id on the bus
+ * and clock name. If this fails, we try to use clock name only.
+ */
+struct clk *clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
+       int idno;
+
+       if (dev == NULL || dev->bus != &platform_bus_type)
+               idno = -1;
+       else
+               idno = to_platform_device(dev)->id;
 
        mutex_lock(&clock_list_sem);
+       list_for_each_entry(p, &clock_list, node) {
+               if (p->id == idno &&
+                   strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
+                       clk = p;
+                       goto found;
+               }
+       }
+
        list_for_each_entry(p, &clock_list, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
+
+found:
        mutex_unlock(&clock_list_sem);
 
        return clk;
index bfb90eb0b7a6caabc529f33e49c5371a74f62208..48121766e8d243e07065e3f3797da7376092cc9b 100644 (file)
@@ -68,12 +68,14 @@ static void __init cache_init(void)
 
                waysize = cpu_data->dcache.sets;
 
+#ifdef CCR_CACHE_ORA
                /*
                 * If the OC is already in RAM mode, we only have
                 * half of the entries to flush..
                 */
                if (ccr & CCR_CACHE_ORA)
                        waysize >>= 1;
+#endif
 
                waysize <<= cpu_data->dcache.entry_shift;
 
index 1c034c283f594aee3b73ac546909dfb173bfbd17..0049d217561aa726797c97811a7198b60104535f 100644 (file)
@@ -1,8 +1,9 @@
 #
 # Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
 #
-obj-y  += ipr.o imask.o
+obj-y  += imask.o
 
+obj-$(CONFIG_CPU_HAS_IPR_IRQ)          += ipr.o
 obj-$(CONFIG_CPU_HAS_PINT_IRQ)         += pint.o
 obj-$(CONFIG_CPU_HAS_MASKREG_IRQ)      += maskreg.o
 obj-$(CONFIG_CPU_HAS_INTC2_IRQ)                += intc2.o
index a33ae3e0a5a5dc808f72c425d69afca96d4ed531..301b505c4278929668bceeee5b2b4b09a4eca06f 100644 (file)
@@ -53,7 +53,10 @@ void static inline set_interrupt_registers(int ip)
 {
        unsigned long __dummy;
 
-       asm volatile("ldc       %2, r6_bank\n\t"
+       asm volatile(
+#ifdef CONFIG_CPU_HAS_SR_RB
+                    "ldc       %2, r6_bank\n\t"
+#endif
                     "stc       sr, %0\n\t"
                     "and       #0xf0, %0\n\t"
                     "shlr2     %0\n\t"
index 74ca576a7ce502984e26cf8511a7808acaf65c03..74defe76a0580c163f2f883c495c78db05c2448d 100644 (file)
  * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780.
  */
 #include <linux/kernel.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
-#include <asm/system.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7760)
+#define INTC2_BASE     0xfe080000
+#define INTC2_INTMSK   (INTC2_BASE + 0x40)
+#define INTC2_INTMSKCLR        (INTC2_BASE + 0x60)
+#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
+#define INTC2_BASE     0xffd40000
+#define INTC2_INTMSK   (INTC2_BASE + 0x38)
+#define INTC2_INTMSKCLR        (INTC2_BASE + 0x3c)
+#endif
 
 static void disable_intc2_irq(unsigned int irq)
 {
        struct intc2_data *p = get_irq_chip_data(irq);
-       ctrl_outl(1 << p->msk_shift,
-                 INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset);
+       ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset);
 }
 
 static void enable_intc2_irq(unsigned int irq)
 {
        struct intc2_data *p = get_irq_chip_data(irq);
-       ctrl_outl(1 << p->msk_shift,
-                 INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset);
+       ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset);
 }
 
 static struct irq_chip intc2_irq_chip = {
@@ -61,12 +68,10 @@ void make_intc2_irq(struct intc2_data *table, unsigned int nr_irqs)
                /* Set the priority level */
                local_irq_save(flags);
 
-               ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET +
-                              p->ipr_offset);
+               ipr = ctrl_inl(INTC2_BASE + p->ipr_offset);
                ipr &= ~(0xf << p->ipr_shift);
                ipr |= p->priority << p->ipr_shift;
-               ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET +
-                         p->ipr_offset);
+               ctrl_outl(ipr, INTC2_BASE + p->ipr_offset);
 
                local_irq_restore(flags);
 
index a0089563cbfcfef2d7618a45c97e401185e0d86c..35eb5751a3aaf842ecaecdb59e66f2dc70bc6d01 100644 (file)
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/module.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/machvec.h>
-
+#include <linux/io.h>
+#include <linux/interrupt.h>
 
 static void disable_ipr_irq(unsigned int irq)
 {
        struct ipr_data *p = get_irq_chip_data(irq);
-       int shift = p->shift*4;
        /* Set the priority in IPR to 0 */
-       ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr);
+       ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
 }
 
 static void enable_ipr_irq(unsigned int irq)
 {
        struct ipr_data *p = get_irq_chip_data(irq);
-       int shift = p->shift*4;
        /* Set priority in IPR back to original value */
-       ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr);
+       ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
 }
 
 static struct irq_chip ipr_irq_chip = {
@@ -53,6 +49,10 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs)
 
        for (i = 0; i < nr_irqs; i++) {
                unsigned int irq = table[i].irq;
+               table[i].addr = map_ipridx_to_addr(table[i].ipr_idx);
+               /* could the IPR index be mapped, if not we ignore this */
+               if (table[i].addr == 0)
+                       continue;
                disable_irq_nosync(irq);
                set_irq_chip_and_handler_name(irq, &ipr_irq_chip,
                                      handle_level_irq, "level");
@@ -62,83 +62,6 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs)
 }
 EXPORT_SYMBOL(make_ipr_irq);
 
-static struct ipr_data sys_ipr_map[] = {
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-       { TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY },
-       { TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY },
-#ifdef RTC_IRQ
-       { RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY },
-#endif
-#ifdef SCI_ERI_IRQ
-       { SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-       { SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-       { SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY },
-#endif
-#ifdef SCIF1_ERI_IRQ
-       { SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-       { SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-       { SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-       { SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-       { SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY },
-       { DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-       { DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY },
-       { VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY },
-#endif
-#ifdef SCIF_ERI_IRQ
-       { SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-       { SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-       { SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-       { SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY },
-#endif
-#ifdef IRDA_ERI_IRQ
-       { IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-       { IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-       { IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-       { IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY },
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
-       /*
-        * Initialize the Interrupt Controller (INTC)
-        * registers to their power on values
-        */
-
-       /*
-        * Enable external irq (INTC IRQ mode).
-        * You should set corresponding bits of PFC to "00"
-        * to enable these interrupts.
-        */
-       { IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY },
-       { IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY },
-       { IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY },
-       { IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY },
-       { IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY },
-       { IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY },
-#endif
-#endif
-};
-
-void __init init_IRQ(void)
-{
-       make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map));
-
-#ifdef CONFIG_CPU_HAS_PINT_IRQ
-       init_IRQ_pint();
-#endif
-
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-       init_IRQ_intc2();
-#endif
-       /* Perform the machine specific initialisation */
-       if (sh_mv.mv_init_irq != NULL)
-               sh_mv.mv_init_irq();
-
-       irq_ctx_init(smp_processor_id());
-}
-
 #if !defined(CONFIG_CPU_HAS_PINT_IRQ)
 int ipr_irq_demux(int irq)
 {
index 389353fba608f7aecf9a6e606f8f28892fbdeaf5..f0f059acfcfbd5707766eea4c8c15064bfd437cf 100644 (file)
@@ -2,5 +2,6 @@
 # Makefile for the Linux/SuperH SH-2 backends.
 #
 
-obj-y  := probe.o
+obj-y  := ex.o probe.o entry.o
 
+obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c
new file mode 100644 (file)
index 0000000..d0440b2
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/kernel/cpu/sh2/clock-sh7619.c
+ *
+ * SH7619 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2};
+const static int pfc_divisors[]={1,2,0,4};
+
+#if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+       clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_master_clk_ops = {
+       .init           = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+       int idx = (ctrl_inw(FREQCR) & 0x0007);
+       clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7619_module_clk_ops = {
+       .recalc         = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+       clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7];
+}
+
+static struct clk_ops sh7619_bus_clk_ops = {
+       .recalc         = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+       clk->rate = clk->parent->rate;
+}
+
+static struct clk_ops sh7619_cpu_clk_ops = {
+       .recalc         = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7619_clk_ops[] = {
+       &sh7619_master_clk_ops,
+       &sh7619_module_clk_ops,
+       &sh7619_bus_clk_ops,
+       &sh7619_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+       if (idx < ARRAY_SIZE(sh7619_clk_ops))
+               *ops = sh7619_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S
new file mode 100644 (file)
index 0000000..34d51b3
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * arch/sh/kernel/cpu/sh2/entry.S
+ *
+ * The SH-2 exception entry
+ *
+ * Copyright (C) 2005,2006 Yoshinori Sato
+ * Copyright (C) 2005  AXE,Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/page.h>
+       
+/* Offsets to the stack */
+OFF_R0  =  0           /* Return value. New ABI also arg4 */
+OFF_R1  =  4           /* New ABI: arg5 */
+OFF_R2  =  8           /* New ABI: arg6 */
+OFF_R3  =  12          /* New ABI: syscall_nr */
+OFF_R4  =  16          /* New ABI: arg0 */
+OFF_R5  =  20          /* New ABI: arg1 */
+OFF_R6  =  24          /* New ABI: arg2 */
+OFF_R7  =  28          /* New ABI: arg3 */
+OFF_SP =  (15*4)
+OFF_PC  =  (16*4)
+OFF_SR =  (16*4+2*4)
+OFF_TRA        =  (16*4+6*4)
+
+#include <asm/entry-macros.S>
+
+ENTRY(exception_handler)
+       ! already saved r0/r1
+       mov.l   r2,@-sp
+       mov.l   r3,@-sp
+       mov     r0,r1
+       cli
+       mov.l   $cpu_mode,r2
+       mov.l   @r2,r0
+       mov.l   @(5*4,r15),r3   ! previous SR
+       shll2   r3              ! set "S" flag
+       rotl    r0              ! T <- "S" flag
+       rotl    r0              ! "S" flag is LSB
+       rotcr   r3              ! T -> r3:b30
+       shlr    r3
+       shlr    r0
+       bt/s    1f
+        mov.l  r3,@(5*4,r15)   ! copy cpu mode to SR
+       ! switch to kernel mode
+       mov     #1,r0
+       rotr    r0
+       rotr    r0
+       mov.l   r0,@r2          ! enter kernel mode
+       mov.l   $current_thread_info,r2
+       mov.l   @r2,r2
+       mov     #0x20,r0
+       shll8   r0
+       add     r2,r0
+       mov     r15,r2          ! r2 = user stack top
+       mov     r0,r15          ! switch kernel stack
+       add     #-4,r15         ! dummy
+       mov.l   r1,@-r15        ! TRA
+       sts.l   macl, @-r15
+       sts.l   mach, @-r15
+       stc.l   gbr, @-r15
+       mov.l   @(4*4,r2),r0
+       mov.l   @(5*4,r2),r1
+       mov.l   r1,@-r15        ! original SR
+       sts.l   pr,@-r15
+       mov.l   r0,@-r15        ! original PC
+       mov     r2,r3
+       add     #(4+2)*4,r3     ! rewind r0 - r3 + exception frame
+       mov.l   r3,@-r15        ! original SP
+       mov.l   r14,@-r15
+       mov.l   r13,@-r15
+       mov.l   r12,@-r15
+       mov.l   r11,@-r15
+       mov.l   r10,@-r15
+       mov.l   r9,@-r15
+       mov.l   r8,@-r15
+       mov.l   r7,@-r15
+       mov.l   r6,@-r15
+       mov.l   r5,@-r15
+       mov.l   r4,@-r15
+       mov     r2,r8           ! copy user -> kernel stack
+       mov.l   @r8+,r3
+       mov.l   r3,@-r15
+       mov.l   @r8+,r2
+       mov.l   r2,@-r15
+       mov.l   @r8+,r1
+       mov.l   r1,@-r15
+       mov.l   @r8+,r0
+       bra     2f
+        mov.l  r0,@-r15
+1:
+       ! in kernel exception
+       mov     #(22-4-4-1)*4+4,r0
+       mov     r15,r2
+       sub     r0,r15
+       mov.l   @r2+,r0         ! old R3
+       mov.l   r0,@-r15        
+       mov.l   @r2+,r0         ! old R2
+       mov.l   r0,@-r15        
+       mov.l   @r2+,r0         ! old R1
+       mov.l   r0,@-r15        
+       mov.l   @r2+,r0         ! old R0
+       mov.l   r0,@-r15        
+       mov.l   @r2+,r3         ! old PC
+       mov.l   @r2+,r0         ! old SR
+       add     #-4,r2          ! exception frame stub (sr)
+       mov.l   r1,@-r2         ! TRA
+       sts.l   macl, @-r2
+       sts.l   mach, @-r2
+       stc.l   gbr, @-r2
+       mov.l   r0,@-r2         ! save old SR
+       sts.l   pr,@-r2
+       mov.l   r3,@-r2         ! save old PC
+       mov     r2,r0
+       add     #8*4,r0
+       mov.l   r0,@-r2         ! save old SP
+       mov.l   r14,@-r2
+       mov.l   r13,@-r2
+       mov.l   r12,@-r2
+       mov.l   r11,@-r2
+       mov.l   r10,@-r2
+       mov.l   r9,@-r2
+       mov.l   r8,@-r2
+       mov.l   r7,@-r2
+       mov.l   r6,@-r2
+       mov.l   r5,@-r2
+       mov.l   r4,@-r2
+       mov.l   @(OFF_R0,r15),r0
+       mov.l   @(OFF_R1,r15),r1
+       mov.l   @(OFF_R2,r15),r2
+       mov.l   @(OFF_R3,r15),r3
+2:
+       mov     #OFF_TRA,r8
+       add     r15,r8
+       mov.l   @r8,r9  
+       mov     #64,r8
+       cmp/hs  r8,r9
+       bt      interrupt_entry ! vec >= 64 is interrupt
+       mov     #32,r8
+       cmp/hs  r8,r9
+       bt      trap_entry      ! 64 > vec >= 32  is trap
+       mov.l   4f,r8
+       mov     r9,r4
+       shll2   r9
+       add     r9,r8
+       mov.l   @r8,r8
+       mov     #0,r9
+       cmp/eq  r9,r8
+       bf      3f
+       mov.l   8f,r8           ! unhandled exception
+3:
+       mov.l   5f,r10
+       jmp     @r8
+        lds    r10,pr
+
+interrupt_entry:
+       mov     r9,r4
+       mov.l   6f,r9
+       mov.l   7f,r8
+       jmp     @r8
+        lds    r9,pr
+
+       .align  2
+4:     .long   exception_handling_table
+5:     .long   ret_from_exception
+6:     .long   ret_from_irq
+7:     .long   do_IRQ
+8:     .long   do_exception_error
+       
+trap_entry:    
+       add     #-0x10,r9
+       shll2   r9                      ! TRA
+       mov     #OFF_TRA,r8
+       add     r15,r8
+       mov.l   r9,@r8
+       mov     r9,r8
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   5f, r9
+       jsr     @r9
+        nop
+#endif
+       sti
+       bra     system_call
+        nop
+       
+       .align  2
+1:     .long   syscall_exit
+2:     .long   break_point_trap_software
+3:     .long   NR_syscalls
+4:     .long   sys_call_table
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:     .long   trace_hardirqs_on
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+       /* Unwind the stack and jmp to the debug entry */
+debug_kernel_fw:
+       mov     r15,r0
+       add     #(22-4)*4-4,r0
+       ldc.l   @r0+,gbr
+       lds.l   @r0+,mach
+       lds.l   @r0+,macl
+       mov     r15,r0
+       mov.l   @(OFF_SP,r0),r1
+       mov     #OFF_SR,r2
+       mov.l   @(r0,r2),r3
+       mov.l   r3,@-r1
+       mov     #OFF_SP,r2
+       mov.l   @(r0,r2),r3
+       mov.l   r3,@-r1
+       mov     r15,r0
+       add     #(22-4)*4-8,r0
+       mov.l   1f,r2
+       mov.l   @r2,r2
+       stc     sr,r3
+       mov.l   r2,@r0
+       mov.l   r3,@r0
+       mov.l   r1,@(8,r0)      
+       mov.l   @r15+, r0
+       mov.l   @r15+, r1
+       mov.l   @r15+, r2
+       mov.l   @r15+, r3
+       mov.l   @r15+, r4
+       mov.l   @r15+, r5
+       mov.l   @r15+, r6
+       mov.l   @r15+, r7
+       mov.l   @r15+, r8
+       mov.l   @r15+, r9
+       mov.l   @r15+, r10
+       mov.l   @r15+, r11
+       mov.l   @r15+, r12
+       mov.l   @r15+, r13
+       mov.l   @r15+, r14
+       add     #8,r15
+       lds.l   @r15+, pr
+       rte
+        mov.l  @r15+,r15
+       .align  2
+1:     .long   gdb_vbr_vector
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
+ENTRY(address_error_handler)
+       mov     r15,r4                          ! regs
+       add     #4,r4
+       mov     #OFF_PC,r0
+       mov.l   @(r0,r15),r6                    ! pc
+       mov.l   1f,r0
+       jmp     @r0
+        mov    #0,r5                           ! writeaccess is unknown
+       .align  2
+
+1:     .long   do_address_error
+
+restore_all:
+       cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   3f, r0
+       jsr     @r0
+        nop
+#endif
+       mov     r15,r0
+       mov.l   $cpu_mode,r2
+       mov     #OFF_SR,r3
+       mov.l   @(r0,r3),r1
+       mov.l   r1,@r2
+       shll2   r1                              ! clear MD bit
+       shlr2   r1
+       mov.l   @(OFF_SP,r0),r2
+       add     #-8,r2
+       mov.l   r2,@(OFF_SP,r0)                 ! point exception frame top
+       mov.l   r1,@(4,r2)                      ! set sr
+       mov     #OFF_PC,r3
+       mov.l   @(r0,r3),r1
+       mov.l   r1,@r2                          ! set pc
+       add     #4*16+4,r0
+       lds.l   @r0+,pr
+       add     #4,r0                           ! skip sr
+       ldc.l   @r0+,gbr
+       lds.l   @r0+,mach
+       lds.l   @r0+,macl
+       get_current_thread_info r0, r1
+       mov.l   $current_thread_info,r1
+       mov.l   r0,@r1
+       mov.l   @r15+,r0
+       mov.l   @r15+,r1
+       mov.l   @r15+,r2
+       mov.l   @r15+,r3
+       mov.l   @r15+,r4
+       mov.l   @r15+,r5
+       mov.l   @r15+,r6
+       mov.l   @r15+,r7
+       mov.l   @r15+,r8
+       mov.l   @r15+,r9
+       mov.l   @r15+,r10
+       mov.l   @r15+,r11
+       mov.l   @r15+,r12
+       mov.l   @r15+,r13
+       mov.l   @r15+,r14
+       mov.l   @r15,r15
+       rte
+        nop
+2:
+       mov.l   1f,r8
+       mov.l   2f,r9
+       jmp     @r9
+        lds    r8,pr
+
+       .align  2
+$current_thread_info:
+       .long   __current_thread_info
+$cpu_mode:     
+       .long   __cpu_mode
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:     .long   trace_hardirqs_off
+#endif
+               
+! common exception handler
+#include "../../entry-common.S"
+       
+       .data
+! cpu operation mode 
+! bit30 = MD (compatible SH3/4)
+__cpu_mode:
+       .long   0x40000000
+               
+       .section        .bss
+__current_thread_info:
+       .long   0
+
+ENTRY(exception_handling_table)
+       .space  4*32
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S
new file mode 100644 (file)
index 0000000..6d285af
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * arch/sh/kernel/cpu/sh2/ex.S
+ *
+ * The SH-2 exception vector table
+ *
+ * Copyright (C) 2005 Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+
+!
+! convert Exception Vector to Exception Number
+!
+exception_entry:       
+no     =       0
+       .rept   256
+       mov.l   r0,@-sp
+       mov     #no,r0
+       bra     exception_trampoline
+       and     #0xff,r0
+no     =       no + 1
+       .endr
+exception_trampoline:
+       mov.l   r1,@-sp
+       mov.l   $exception_handler,r1
+       jmp     @r1
+
+       .align  2
+$exception_entry:
+       .long   exception_entry
+$exception_handler:
+       .long   exception_handler
+!
+! Exception Vector Base
+!
+       .align  2
+ENTRY(vbr_base)
+vector =       0
+       .rept   256
+       .long   exception_entry + vector * 8
+vector =       vector + 1
+       .endr
index f17a2a0d588edf52985018241c333374d7da03da..ba527d9b502411a56db05cb9b840688f8e4ba4fb 100644 (file)
 
 int __init detect_cpu_and_cache_system(void)
 {
-       /*
-        * For now, assume SH7604 .. fix this later.
-        */
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
        cpu_data->type                  = CPU_SH7604;
        cpu_data->dcache.ways           = 4;
-       cpu_data->dcache.way_shift      = 6;
+       cpu_data->dcache.way_incr       = (1<<10);
        cpu_data->dcache.sets           = 64;
        cpu_data->dcache.entry_shift    = 4;
        cpu_data->dcache.linesz         = L1_CACHE_BYTES;
        cpu_data->dcache.flags          = 0;
-
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+       cpu_data->type                  = CPU_SH7619;
+       cpu_data->dcache.ways           = 4;
+       cpu_data->dcache.way_incr       = (1<<12);
+       cpu_data->dcache.sets           = 256;
+       cpu_data->dcache.entry_shift    = 4;
+       cpu_data->dcache.linesz         = L1_CACHE_BYTES;
+       cpu_data->dcache.flags          = 0;
+#endif
        /*
         * SH-2 doesn't have separate caches
         */
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
new file mode 100644 (file)
index 0000000..82c2d90
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * SH7619 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+       {
+               .mapbase        = 0xf8400000,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 88, 89, 91, 90},
+       }, {
+               .mapbase        = 0xf8410000,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 92, 93, 95, 94},
+       }, {
+               .mapbase        = 0xf8420000,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 96, 97, 99, 98},
+       }, {
+               .flags = 0,
+       }
+};
+
+static struct platform_device sci_device = {
+       .name           = "sh-sci",
+       .id             = -1,
+       .dev            = {
+               .platform_data  = sci_platform_data,
+       },
+};
+
+static struct platform_device *sh7619_devices[] __initdata = {
+       &sci_device,
+};
+
+static int __init sh7619_devices_setup(void)
+{
+       return platform_add_devices(sh7619_devices,
+                                   ARRAY_SIZE(sh7619_devices));
+}
+__initcall(sh7619_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile
new file mode 100644 (file)
index 0000000..350972a
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the Linux/SuperH SH-2A backends.
+#
+
+obj-y  := common.o probe.o
+
+common-y       += $(addprefix ../sh2/, ex.o)
+common-y       += $(addprefix ../sh2/, entry.o)
+
+obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c
new file mode 100644 (file)
index 0000000..a9ad309
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/clock-sh7206.c
+ *
+ * SH7206 support for the clock framework
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * Based on clock-sh4.c
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+const static int pll1rate[]={1,2,3,4,6,8};
+const static int pfc_divisors[]={1,2,3,4,6,8,12};
+#define ifc_divisors pfc_divisors
+
+#if (CONFIG_SH_CLK_MD == 2)
+#define PLL2 (4)
+#elif (CONFIG_SH_CLK_MD == 6)
+#define PLL2 (2)
+#elif (CONFIG_SH_CLK_MD == 7)
+#define PLL2 (1)
+#else
+#error "Illigal Clock Mode!"
+#endif
+
+static void master_clk_init(struct clk *clk)
+{
+       clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_master_clk_ops = {
+       .init           = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+       int idx = (ctrl_inw(FREQCR) & 0x0007);
+       clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7206_module_clk_ops = {
+       .recalc         = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+       clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007];
+}
+
+static struct clk_ops sh7206_bus_clk_ops = {
+       .recalc         = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+       int idx = (ctrl_inw(FREQCR) & 0x0007);
+       clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7206_cpu_clk_ops = {
+       .recalc         = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7206_clk_ops[] = {
+       &sh7206_master_clk_ops,
+       &sh7206_module_clk_ops,
+       &sh7206_bus_clk_ops,
+       &sh7206_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+       if (idx < ARRAY_SIZE(sh7206_clk_ops))
+               *ops = sh7206_clk_ops[idx];
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
new file mode 100644 (file)
index 0000000..87c6c05
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/probe.c
+ *
+ * CPU Subtype Probing for SH-2A.
+ *
+ * Copyright (C) 2004, 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+int __init detect_cpu_and_cache_system(void)
+{
+       /* Just SH7206 for now .. */
+       cpu_data->type                  = CPU_SH7206;
+
+       cpu_data->dcache.ways           = 4;
+       cpu_data->dcache.way_incr       = (1 << 11);
+       cpu_data->dcache.sets           = 128;
+       cpu_data->dcache.entry_shift    = 4;
+       cpu_data->dcache.linesz         = L1_CACHE_BYTES;
+       cpu_data->dcache.flags          = 0;
+
+       /*
+        * The icache is the same as the dcache as far as this setup is
+        * concerned. The only real difference in hardware is that the icache
+        * lacks the U bit that the dcache has, none of this has any bearing
+        * on the cache info.
+        */
+       cpu_data->icache                = cpu_data->dcache;
+
+       return 0;
+}
+
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c
new file mode 100644 (file)
index 0000000..cdfeef4
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * SH7206 Setup
+ *
+ *  Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+       {
+               .mapbase        = 0xfffe8000,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 240, 241, 242, 243},
+       }, {
+               .mapbase        = 0xfffe8800,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 244, 245, 246, 247},
+       }, {
+               .mapbase        = 0xfffe9000,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 248, 249, 250, 251},
+       }, {
+               .mapbase        = 0xfffe9800,
+               .flags          = UPF_BOOT_AUTOCONF,
+               .type           = PORT_SCIF,
+               .irqs           =  { 252, 253, 254, 255},
+       }, {
+               .flags = 0,
+       }
+};
+
+static struct platform_device sci_device = {
+       .name           = "sh-sci",
+       .id             = -1,
+       .dev            = {
+               .platform_data  = sci_platform_data,
+       },
+};
+
+static struct platform_device *sh7206_devices[] __initdata = {
+       &sci_device,
+};
+
+static int __init sh7206_devices_setup(void)
+{
+       return platform_add_devices(sh7206_devices,
+                                   ARRAY_SIZE(sh7206_devices));
+}
+__initcall(sh7206_devices_setup);
index 58d3815695ffa70d21f15f4243461223b0b70064..83905e4e43872017ea905073f12410eca98e3a80 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the Linux/SuperH SH-3 backends.
 #
 
-obj-y  := ex.o probe.o
+obj-y  := ex.o probe.o entry.o
 
 # CPU subtype setup
 obj-$(CONFIG_CPU_SUBTYPE_SH7705)       += setup-sh7705.o
index 10461a745e5f0f89471fea0c04dfe9c19f6a96ef..b791a29fdb6245773c045eb55ab5e39e2e830e79 100644 (file)
@@ -24,7 +24,7 @@ static int pfc_divisors[]    = { 1, 2, 4, 1, 3, 6, 1, 1 };
 
 static void set_bus_parent(struct clk *clk)
 {
-       struct clk *bus_clk = clk_get("bus_clk");
+       struct clk *bus_clk = clk_get(NULL, "bus_clk");
        clk->parent = bus_clk;
        clk_put(bus_clk);
 }
similarity index 58%
rename from arch/sh/kernel/entry.S
rename to arch/sh/kernel/cpu/sh3/entry.S
index 39aaefb2d83f427d58a0b05ad48a45868dfba2e2..8c0dc2700c69ebc79df3085e0c2e93ce2099a6bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/arch/sh/entry.S
+ * arch/sh/kernel/entry.S
  *
  *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
  *  Copyright (C) 2003 - 2006  Paul Mundt
@@ -7,15 +7,16 @@
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
- *
  */
 #include <linux/sys.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
-#include <asm/cpu/mmu_context.h>
 #include <asm/unistd.h>
+#include <asm/cpu/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
 
 ! NOTE:
 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -81,6 +82,8 @@ OFF_TRA       =  (16*4+6*4)
 #define k_g_imask      r6_bank /* r6_bank1 */
 #define current                r7      /* r7_bank1 */
 
+#include <asm/entry-macros.S>
+       
 /*
  * Kernel mode register usage:
  *     k0      scratch
@@ -107,26 +110,6 @@ OFF_TRA    =  (16*4+6*4)
 ! this first version depends *much* on C implementation.
 !
 
-#define CLI()                          \
-       stc     sr, r0;                 \
-       or      #0xf0, r0;              \
-       ldc     r0, sr
-
-#define STI()                          \
-       mov.l   __INV_IMASK, r11;       \
-       stc     sr, r10;                \
-       and     r11, r10;               \
-       stc     k_g_imask, r11;         \
-       or      r11, r10;               \
-       ldc     r10, sr
-
-#if defined(CONFIG_PREEMPT)
-#  define preempt_stop()       CLI()
-#else
-#  define preempt_stop()
-#  define resume_kernel                restore_all
-#endif
-
 #if defined(CONFIG_MMU)
        .align  2
 ENTRY(tlb_miss_load)
@@ -155,29 +138,14 @@ ENTRY(tlb_protection_violation_store)
 
 call_dpf:
        mov.l   1f, r0
-       mov     r5, r8
-       mov.l   @r0, r6
-       mov     r6, r9
-       mov.l   2f, r0
-       sts     pr, r10
-       jsr     @r0
-        mov    r15, r4
-       !
-       tst     r0, r0
-       bf/s    0f
-        lds    r10, pr
-       rts
-        nop
-0:     STI()
+       mov.l   @r0, r6         ! address
        mov.l   3f, r0
-       mov     r9, r6
-       mov     r8, r5
+
        jmp     @r0
-        mov    r15, r4
+        mov    r15, r4         ! regs
 
        .align 2
 1:     .long   MMU_TEA
-2:     .long   __do_page_fault
 3:     .long   do_page_fault
 
        .align  2
@@ -203,32 +171,6 @@ call_dae:
 2:     .long   do_address_error
 #endif /* CONFIG_MMU */
 
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
-! If both are configured, handle the debug traps (breakpoints) in SW,
-! but still allow BIOS traps to FW.
-
-       .align  2
-debug_kernel:
-#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
-       /* Force BIOS call to FW (debug_trap put TRA in r8) */
-       mov     r8,r0
-       shlr2   r0
-       cmp/eq  #0x3f,r0
-       bt      debug_kernel_fw
-#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
-
-debug_enter:           
-#if defined(CONFIG_SH_KGDB)
-       /* Jump to kgdb, pass stacked regs as arg */
-debug_kernel_sw:
-       mov.l   3f, r0
-       jmp     @r0
-        mov    r15, r4
-       .align  2
-3:     .long   kgdb_handle_exception
-#endif /* CONFIG_SH_KGDB */
-
 #if defined(CONFIG_SH_STANDARD_BIOS)
        /* Unwind the stack and jmp to the debug entry */
 debug_kernel_fw:
@@ -269,276 +211,6 @@ debug_kernel_fw:
 2:     .long   gdb_vbr_vector
 #endif /* CONFIG_SH_STANDARD_BIOS */
 
-#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
-
-
-       .align  2
-debug_trap:    
-#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
-       mov     #OFF_SR, r0
-       mov.l   @(r0,r15), r0           ! get status register
-       shll    r0
-       shll    r0                      ! kernel space?
-       bt/s    debug_kernel
-#endif
-        mov.l  @r15, r0                ! Restore R0 value
-       mov.l   1f, r8
-       jmp     @r8
-        nop
-
-       .align  2
-ENTRY(exception_error)
-       !
-       STI()
-       mov.l   2f, r0
-       jmp     @r0
-        nop
-
-!
-       .align  2
-1:     .long   break_point_trap_software
-2:     .long   do_exception_error
-
-       .align  2
-ret_from_exception:
-       preempt_stop()
-ENTRY(ret_from_irq)
-       !
-       mov     #OFF_SR, r0
-       mov.l   @(r0,r15), r0   ! get status register
-       shll    r0
-       shll    r0              ! kernel space?
-       bt/s    resume_kernel   ! Yes, it's from kernel, go back soon
-        GET_THREAD_INFO(r8)
-
-#ifdef CONFIG_PREEMPT
-       bra     resume_userspace
-        nop
-ENTRY(resume_kernel)
-       mov.l   @(TI_PRE_COUNT,r8), r0  ! current_thread_info->preempt_count
-       tst     r0, r0
-       bf      noresched
-need_resched:
-       mov.l   @(TI_FLAGS,r8), r0      ! current_thread_info->flags
-       tst     #_TIF_NEED_RESCHED, r0  ! need_resched set?
-       bt      noresched
-
-       mov     #OFF_SR, r0
-       mov.l   @(r0,r15), r0           ! get status register
-       and     #0xf0, r0               ! interrupts off (exception path)?
-       cmp/eq  #0xf0, r0
-       bt      noresched
-
-       mov.l   1f, r0
-       mov.l   r0, @(TI_PRE_COUNT,r8)
-
-       STI()
-       mov.l   2f, r0
-       jsr     @r0
-        nop
-       mov     #0, r0
-       mov.l   r0, @(TI_PRE_COUNT,r8)
-       CLI()
-
-       bra     need_resched
-        nop
-noresched:
-       bra     restore_all
-        nop
-
-       .align 2
-1:     .long   PREEMPT_ACTIVE
-2:     .long   schedule
-#endif
-
-ENTRY(resume_userspace)
-       ! r8: current_thread_info
-       CLI()
-       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
-       tst     #_TIF_WORK_MASK, r0
-       bt/s    restore_all
-        tst    #_TIF_NEED_RESCHED, r0
-
-       .align  2
-work_pending:
-       ! r0: current_thread_info->flags
-       ! r8: current_thread_info
-       ! t:  result of "tst    #_TIF_NEED_RESCHED, r0"
-       bf/s    work_resched
-        tst    #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
-work_notifysig:
-       bt/s    restore_all
-        mov    r15, r4
-       mov     r12, r5         ! set arg1(save_r0)
-       mov     r0, r6
-       mov.l   2f, r1
-       mova    restore_all, r0
-       jmp     @r1
-        lds    r0, pr
-work_resched:
-#ifndef CONFIG_PREEMPT
-       ! gUSA handling
-       mov.l   @(OFF_SP,r15), r0       ! get user space stack pointer
-       mov     r0, r1
-       shll    r0
-       bf/s    1f
-        shll   r0
-       bf/s    1f
-        mov    #OFF_PC, r0
-       !                                 SP >= 0xc0000000 : gUSA mark
-       mov.l   @(r0,r15), r2           ! get user space PC (program counter)
-       mov.l   @(OFF_R0,r15), r3       ! end point
-       cmp/hs  r3, r2                  ! r2 >= r3? 
-       bt      1f
-       add     r3, r1                  ! rewind point #2
-       mov.l   r1, @(r0,r15)           ! reset PC to rewind point #2
-       !
-1:
-#endif
-       mov.l   1f, r1
-       jsr     @r1                             ! schedule
-        nop
-       CLI()
-       !
-       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
-       tst     #_TIF_WORK_MASK, r0
-       bt      restore_all
-       bra     work_pending
-        tst    #_TIF_NEED_RESCHED, r0
-
-       .align  2
-1:     .long   schedule
-2:     .long   do_notify_resume
-
-       .align  2
-syscall_exit_work:
-       ! r0: current_thread_info->flags
-       ! r8: current_thread_info
-       tst     #_TIF_SYSCALL_TRACE, r0
-       bt/s    work_pending
-        tst    #_TIF_NEED_RESCHED, r0
-       STI()
-       ! XXX setup arguments...
-       mov.l   4f, r0                  ! do_syscall_trace
-       jsr     @r0
-        nop
-       bra     resume_userspace
-        nop
-
-       .align  2
-syscall_trace_entry:
-       !                       Yes it is traced.
-       ! XXX setup arguments...
-       mov.l   4f, r11         ! Call do_syscall_trace which notifies
-       jsr     @r11            ! superior (will chomp R[0-7])
-        nop
-       !                       Reload R0-R4 from kernel stack, where the
-       !                       parent may have modified them using
-       !                       ptrace(POKEUSR).  (Note that R0-R2 are
-       !                       used by the system call handler directly
-       !                       from the kernel stack anyway, so don't need
-       !                       to be reloaded here.)  This allows the parent
-       !                       to rewrite system calls and args on the fly.
-       mov.l   @(OFF_R4,r15), r4   ! arg0
-       mov.l   @(OFF_R5,r15), r5
-       mov.l   @(OFF_R6,r15), r6
-       mov.l   @(OFF_R7,r15), r7   ! arg3
-       mov.l   @(OFF_R3,r15), r3   ! syscall_nr
-       !                   Arrange for do_syscall_trace to be called
-       !                   again as the system call returns.
-       mov.l   2f, r10                 ! Number of syscalls
-       cmp/hs  r10, r3
-       bf      syscall_call
-       mov     #-ENOSYS, r0
-       bra     syscall_exit
-        mov.l  r0, @(OFF_R0,r15)       ! Return value
-
-/*
- * Syscall interface:
- *
- *     Syscall #: R3
- *     Arguments #0 to #3: R4--R7
- *     Arguments #4 to #6: R0, R1, R2
- *     TRA: (number of arguments + 0x10) x 4
- *
- * This code also handles delegating other traps to the BIOS/gdb stub
- * according to:
- *
- * Trap number
- * (TRA>>2)        Purpose
- * --------        -------
- * 0x0-0xf         old syscall ABI
- * 0x10-0x1f       new syscall ABI
- * 0x20-0xff       delegated through debug_trap to BIOS/gdb stub.
- *
- * Note: When we're first called, the TRA value must be shifted
- * right 2 bits in order to get the value that was used as the "trapa"
- * argument.
- */
-
-       .align  2
-       .globl  ret_from_fork
-ret_from_fork:
-       mov.l   1f, r8
-       jsr     @r8
-        mov    r0, r4
-       bra     syscall_exit
-        nop
-       .align  2
-1:     .long   schedule_tail
-       !
-ENTRY(system_call)
-       mov.l   1f, r9
-       mov.l   @r9, r8         ! Read from TRA (Trap Address) Register
-       !
-       ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
-       mov     #0x7f, r9
-       cmp/hi  r9, r8
-       bt/s    0f
-        mov    #OFF_TRA, r9
-       add     r15, r9
-       !
-       mov.l   r8, @r9                 ! set TRA value to tra
-       STI()
-       !                   Call the system call handler through the table.
-       !                   First check for bad syscall number
-       mov     r3, r9
-       mov.l   2f, r8                  ! Number of syscalls
-       cmp/hs  r8, r9
-       bf/s    good_system_call
-        GET_THREAD_INFO(r8)
-syscall_badsys:                        ! Bad syscall number
-       mov     #-ENOSYS, r0
-       bra     resume_userspace
-        mov.l  r0, @(OFF_R0,r15)       ! Return value
-       !
-0:
-       bra     debug_trap
-        nop
-       !
-good_system_call:              ! Good syscall number
-       mov.l   @(TI_FLAGS,r8), r8
-       mov     #_TIF_SYSCALL_TRACE, r10
-       tst     r10, r8
-       bf      syscall_trace_entry
-       !
-syscall_call:
-       shll2   r9              ! x4
-       mov.l   3f, r8          ! Load the address of sys_call_table
-       add     r8, r9
-       mov.l   @r9, r8
-       jsr     @r8             ! jump to specific syscall handler
-        nop
-       mov.l   @(OFF_R0,r15), r12              ! save r0
-       mov.l   r0, @(OFF_R0,r15)               ! save the return value
-       !
-syscall_exit:
-       CLI()
-       !
-       GET_THREAD_INFO(r8)
-       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
-       tst     #_TIF_ALLWORK_MASK, r0
-       bf      syscall_exit_work
 restore_all:
        mov.l   @r15+, r0
        mov.l   @r15+, r1
@@ -606,7 +278,9 @@ skip_restore:
        !
        ! Calculate new SR value
        mov     k3, k2                  ! original SR value
-       mov.l   9f, k1
+       mov     #0xf0, k1
+       extu.b  k1, k1
+       not     k1, k1
        and     k1, k2                  ! Mask orignal SR value
        !
        mov     k3, k0                  ! Calculate IMASK-bits
@@ -632,16 +306,12 @@ skip_restore:
         nop
 
        .align  2
-1:     .long   TRA
-2:     .long   NR_syscalls
-3:     .long   sys_call_table
-4:     .long   do_syscall_trace
 5:     .long   0x00001000      ! DSP
 7:     .long   0x30000000
-9:
-__INV_IMASK:
-       .long   0xffffff0f      ! ~(IMASK)
 
+! common exception handler
+#include "../../entry-common.S"
+       
 ! Exception Vector Base
 !
 !      Should be aligned page boundary.
@@ -661,9 +331,176 @@ general_exception:
 2:     .long   ret_from_exception
 !
 !
+
+/* This code makes some assumptions to improve performance.
+ * Make sure they are stil true. */
+#if PTRS_PER_PGD != PTRS_PER_PTE
+#error PGD and PTE sizes don't match
+#endif
+
+/* gas doesn't flag impossible values for mov #immediate as an error */
+#if (_PAGE_PRESENT >> 2) > 0x7f
+#error cannot load PAGE_PRESENT as an immediate
+#endif
+#if _PAGE_DIRTY > 0x7f
+#error cannot load PAGE_DIRTY as an immediate
+#endif
+#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
+#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
+#endif
+
+#if defined(CONFIG_CPU_SH4)
+#define ldmmupteh(r)   mov.l   8f, r
+#else
+#define ldmmupteh(r)   mov     #MMU_PTEH, r
+#endif
+
        .balign         1024,0,1024
 tlb_miss:
-       mov.l   1f, k2
+#ifdef COUNT_EXCEPTIONS
+       ! Increment the counts
+       mov.l   9f, k1
+       mov.l   @k1, k2
+       add     #1, k2
+       mov.l   k2, @k1
+#endif
+
+       ! k0 scratch
+       ! k1 pgd and pte pointers
+       ! k2 faulting address
+       ! k3 pgd and pte index masks
+       ! k4 shift
+
+       ! Load up the pgd entry (k1)
+
+       ldmmupteh(k0)                   !  9 LS (latency=2)     MMU_PTEH
+
+       mov.w   4f, k3                  !  8 LS (latency=2)     (PTRS_PER_PGD-1) << 2
+       mov     #-(PGDIR_SHIFT-2), k4   !  6 EX
+
+       mov.l   @(MMU_TEA-MMU_PTEH,k0), k2      ! 18 LS (latency=2)
+
+       mov.l   @(MMU_TTB-MMU_PTEH,k0), k1      ! 18 LS (latency=2)
+
+       mov     k2, k0                  !   5 MT (latency=0)
+       shld    k4, k0                  !  99 EX
+
+       and     k3, k0                  !  78 EX
+
+       mov.l   @(k0, k1), k1           !  21 LS (latency=2)
+       mov     #-(PAGE_SHIFT-2), k4    !   6 EX
+
+       ! Load up the pte entry (k2)
+
+       mov     k2, k0                  !   5 MT (latency=0)
+       shld    k4, k0                  !  99 EX
+
+       tst     k1, k1                  !  86 MT
+
+       bt      20f                     ! 110 BR
+
+       and     k3, k0                  !  78 EX
+       mov.w   5f, k4                  !   8 LS (latency=2)    _PAGE_PRESENT
+
+       mov.l   @(k0, k1), k2           !  21 LS (latency=2)
+       add     k0, k1                  !  49 EX
+
+#ifdef CONFIG_CPU_HAS_PTEA
+       ! Test the entry for present and _PAGE_ACCESSED
+
+       mov     #-28, k3                !   6 EX
+       mov     k2, k0                  !   5 MT (latency=0)
+
+       tst     k4, k2                  !  68 MT
+       shld    k3, k0                  !  99 EX
+
+       bt      20f                     ! 110 BR
+
+       ! Set PTEA register
+       ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
+       !
+       ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
+
+       and     #0xe, k0                !  79 EX
+
+       mov     k0, k3                  !   5 MT (latency=0)
+       mov     k2, k0                  !   5 MT (latency=0)
+
+       and     #1, k0                  !  79 EX
+
+       or      k0, k3                  !  82 EX
+
+       ldmmupteh(k0)                   !   9 LS (latency=2)
+       shll2   k4                      ! 101 EX                _PAGE_ACCESSED
+
+       tst     k4, k2                  !  68 MT
+
+       mov.l   k3, @(MMU_PTEA-MMU_PTEH,k0)     ! 27 LS
+
+       mov.l   7f, k3                  !   9 LS (latency=2)    _PAGE_FLAGS_HARDWARE_MASK
+
+       ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+#else
+
+       ! Test the entry for present and _PAGE_ACCESSED
+
+       mov.l   7f, k3                  !   9 LS (latency=2)    _PAGE_FLAGS_HARDWARE_MASK
+       tst     k4, k2                  !  68 MT
+
+       shll2   k4                      ! 101 EX                _PAGE_ACCESSED
+       ldmmupteh(k0)                   !   9 LS (latency=2)
+
+       bt      20f                     ! 110 BR
+       tst     k4, k2                  !  68 MT
+
+       ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
+
+#endif
+
+       ! Set up the entry
+
+       and     k2, k3                  !  78 EX
+       bt/s    10f                     ! 108 BR
+
+        mov.l  k3, @(MMU_PTEL-MMU_PTEH,k0)     ! 27 LS
+
+       ldtlb                           ! 128 CO
+
+       ! At least one instruction between ldtlb and rte
+       nop                             ! 119 NOP
+
+       rte                             ! 126 CO
+
+        nop                            ! 119 NOP
+
+
+10:    or      k4, k2                  !  82 EX
+
+       ldtlb                           ! 128 CO
+
+       ! At least one instruction between ldtlb and rte
+       mov.l   k2, @k1                 !  27 LS
+
+       rte                             ! 126 CO
+
+       ! Note we cannot execute mov here, because it is executed after
+       ! restoring SSR, so would be executed in user space.
+        nop                            ! 119 NOP
+
+
+       .align 5
+       ! Once cache line if possible...
+1:     .long   swapper_pg_dir
+4:     .short  (PTRS_PER_PGD-1) << 2
+5:     .short  _PAGE_PRESENT
+7:     .long   _PAGE_FLAGS_HARDWARE_MASK
+8:     .long   MMU_PTEH
+#ifdef COUNT_EXCEPTIONS
+9:     .long   exception_count_miss
+#endif
+
+       ! Either pgd or pte not present
+20:    mov.l   1f, k2
        mov.l   4f, k3
        bra     handle_exception
         mov.l  @k2, k2
@@ -710,8 +547,9 @@ ENTRY(handle_exception)
        bt/s    1f              ! It's a kernel to kernel transition.
         mov    r15, k0         ! save original stack to k0
        /* User space to kernel */
-       mov     #(THREAD_SIZE >> 8), k1
+       mov     #(THREAD_SIZE >> 10), k1
        shll8   k1              ! k1 := THREAD_SIZE
+       shll2   k1
        add     current, k1
        mov     k1, r15         ! change to kernel stack
        !
@@ -761,7 +599,7 @@ skip_save:
        ! Save the user registers on the stack.
        mov.l   k2, @-r15       ! EXPEVT
 
-       mov     #-1, k4
+       mov     #-1, k4
        mov.l   k4, @-r15       ! set TRA (default: -1)
        !
        sts.l   macl, @-r15
@@ -813,6 +651,15 @@ skip_save:
        bf      interrupt_exception
        shlr2   r8
        shlr    r8
+
+#ifdef COUNT_EXCEPTIONS
+       mov.l   5f, r9
+       add     r8, r9
+       mov.l   @r9, r10
+       add     #1, r10
+       mov.l   r10, @r9
+#endif
+
        mov.l   4f, r9
        add     r8, r9
        mov.l   @r9, r9
@@ -826,6 +673,9 @@ skip_save:
 2:     .long   0x000080f0      ! FD=1, IMASK=15
 3:     .long   0xcfffffff      ! RB=0, BL=0
 4:     .long   exception_handling_table
+#ifdef COUNT_EXCEPTIONS
+5:     .long   exception_count_table
+#endif
 
 interrupt_exception:
        mov.l   1f, r9
index 8dbf3895ece7406c248e2405f8bc18b2392f0f81..6e415baf04b4f23bfb5767172dfbe18485ec1b2e 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for the Linux/SuperH SH-4 backends.
 #
 
-obj-y  := ex.o probe.o
+obj-y  := ex.o probe.o common.o
+common-y       += $(addprefix ../sh3/, entry.o)
 
 obj-$(CONFIG_SH_FPU)                    += fpu.o
 obj-$(CONFIG_SH_STORE_QUEUES)          += sq.o
index bfdf5fe8d948f221ac2ef2371a952dbfd6f18561..fa2019aabd74f1827bfb4c8374b6be842393a616 100644 (file)
@@ -97,7 +97,7 @@ static void shoc_clk_recalc(struct clk *clk)
 
 static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
 {
-       struct clk *bclk = clk_get("bus_clk");
+       struct clk *bclk = clk_get(NULL, "bus_clk");
        unsigned long bclk_rate = clk_get_rate(bclk);
 
        clk_put(bclk);
@@ -151,7 +151,7 @@ static struct clk *sh4202_onchip_clocks[] = {
 
 static int __init sh4202_clk_init(void)
 {
-       struct clk *clk = clk_get("master_clk");
+       struct clk *clk = clk_get(NULL, "master_clk");
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
index 93ad367342c9c6a715a4ea96f22b987a007f967b..9e6a216750c800529788b340aa12fdb7f5956f17 100644 (file)
@@ -98,7 +98,7 @@ static struct clk *sh7780_onchip_clocks[] = {
 
 static int __init sh7780_clk_init(void)
 {
-       struct clk *clk = clk_get("master_clk");
+       struct clk *clk = clk_get(NULL, "master_clk");
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) {
index f486c07e10e286b9d4776874beefb6b184584203..7624677f66281e7ef31b99d9b48caf1bc7534b5d 100644 (file)
@@ -282,11 +282,8 @@ ieee_fpe_handler (struct pt_regs *regs)
                        grab_fpu(regs);
                        restore_fpu(tsk);
                        set_tsk_thread_flag(tsk, TIF_USEDFPU);
-               } else {
-                       tsk->thread.trap_no = 11;
-                       tsk->thread.error_code = 0;
+               } else
                        force_sig(SIGFPE, tsk);
-               }
 
                regs->pc = nextpc;
                return 1;
@@ -296,29 +293,29 @@ ieee_fpe_handler (struct pt_regs *regs)
 }
 
 asmlinkage void
-do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7,
-            struct pt_regs regs)
+do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6,
+            unsigned long r7, struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        struct task_struct *tsk = current;
 
-       if (ieee_fpe_handler (&regs))
+       if (ieee_fpe_handler(regs))
                return;
 
-       regs.pc += 2;
-       save_fpu(tsk, &regs);
-       tsk->thread.trap_no = 11;
-       tsk->thread.error_code = 0;
+       regs->pc += 2;
+       save_fpu(tsk, regs);
        force_sig(SIGFPE, tsk);
 }
 
 asmlinkage void
 do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6,
-                    unsigned long r7, struct pt_regs regs)
+                    unsigned long r7, struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        struct task_struct *tsk = current;
 
-       grab_fpu(&regs);
-       if (!user_mode(&regs)) {
+       grab_fpu(regs);
+       if (!user_mode(regs)) {
                printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
                return;
        }
index c294de1e14a3c2ae8d994e25a7f238fcbce9b8b6..afe0f1b1c030636f6cd156c8d48796d5843e0cb4 100644 (file)
@@ -79,16 +79,16 @@ int __init detect_cpu_and_cache_system(void)
        case 0x205:
                cpu_data->type = CPU_SH7750;
                cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-                                  CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+                                  CPU_HAS_PERF_COUNTER;
                break;
        case 0x206:
                cpu_data->type = CPU_SH7750S;
                cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
-                                  CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
+                                  CPU_HAS_PERF_COUNTER;
                break;
        case 0x1100:
                cpu_data->type = CPU_SH7751;
-               cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+               cpu_data->flags |= CPU_HAS_FPU;
                break;
        case 0x2000:
                cpu_data->type = CPU_SH73180;
@@ -126,23 +126,22 @@ int __init detect_cpu_and_cache_system(void)
                break;
        case 0x8000:
                cpu_data->type = CPU_ST40RA;
-               cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+               cpu_data->flags |= CPU_HAS_FPU;
                break;
        case 0x8100:
                cpu_data->type = CPU_ST40GX1;
-               cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+               cpu_data->flags |= CPU_HAS_FPU;
                break;
        case 0x700:
                cpu_data->type = CPU_SH4_501;
                cpu_data->icache.ways = 2;
                cpu_data->dcache.ways = 2;
-               cpu_data->flags |= CPU_HAS_PTEA;
                break;
        case 0x600:
                cpu_data->type = CPU_SH4_202;
                cpu_data->icache.ways = 2;
                cpu_data->dcache.ways = 2;
-               cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+               cpu_data->flags |= CPU_HAS_FPU;
                break;
        case 0x500 ... 0x501:
                switch (prr) {
@@ -160,7 +159,7 @@ int __init detect_cpu_and_cache_system(void)
                cpu_data->icache.ways = 2;
                cpu_data->dcache.ways = 2;
 
-               cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+               cpu_data->flags |= CPU_HAS_FPU;
 
                break;
        default:
@@ -173,6 +172,10 @@ int __init detect_cpu_and_cache_system(void)
        cpu_data->dcache.ways = 1;
 #endif
 
+#ifdef CONFIG_CPU_HAS_PTEA
+       cpu_data->flags |= CPU_HAS_PTEA;
+#endif
+
        /*
         * On anything that's not a direct-mapped cache, look to the CVR
         * for I/D-cache specifics.
index 50812d57c1c1a426aca204a5ad3874d0cfd34324..bbcb06f18b04dc6d8f697873d62880bdbeb52481 100644 (file)
@@ -2,6 +2,7 @@
  * SH7750/SH7751 Setup
  *
  *  Copyright (C) 2006  Paul Mundt
+ *  Copyright (C) 2006  Jamie Lenehan
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -10,6 +11,7 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/serial.h>
+#include <linux/io.h>
 #include <asm/sci.h>
 
 static struct plat_sci_port sci_platform_data[] = {
@@ -46,3 +48,71 @@ static int __init sh7750_devices_setup(void)
                                    ARRAY_SIZE(sh7750_devices));
 }
 __initcall(sh7750_devices_setup);
+
+static struct ipr_data sh7750_ipr_map[] = {
+       /* IRQ, IPR-idx, shift, priority */
+       { 16, 0, 12, 2 }, /* TMU0 TUNI*/
+       { 17, 0, 12, 2 }, /* TMU1 TUNI */
+       { 18, 0,  4, 2 }, /* TMU2 TUNI */
+       { 19, 0,  4, 2 }, /* TMU2 TIPCI */
+       { 27, 1, 12, 2 }, /* WDT ITI */
+       { 20, 0,  0, 2 }, /* RTC ATI (alarm) */
+       { 21, 0,  0, 2 }, /* RTC PRI (period) */
+       { 22, 0,  0, 2 }, /* RTC CUI (carry) */
+       { 23, 1,  4, 3 }, /* SCI ERI */
+       { 24, 1,  4, 3 }, /* SCI RXI */
+       { 25, 1,  4, 3 }, /* SCI TXI */
+       { 40, 2,  4, 3 }, /* SCIF ERI */
+       { 41, 2,  4, 3 }, /* SCIF RXI */
+       { 42, 2,  4, 3 }, /* SCIF BRI */
+       { 43, 2,  4, 3 }, /* SCIF TXI */
+       { 34, 2,  8, 7 }, /* DMAC DMTE0 */
+       { 35, 2,  8, 7 }, /* DMAC DMTE1 */
+       { 36, 2,  8, 7 }, /* DMAC DMTE2 */
+       { 37, 2,  8, 7 }, /* DMAC DMTE3 */
+       { 28, 2,  8, 7 }, /* DMAC DMAE */
+};
+
+static struct ipr_data sh7751_ipr_map[] = {
+       { 44, 2,  8, 7 }, /* DMAC DMTE4 */
+       { 45, 2,  8, 7 }, /* DMAC DMTE5 */
+       { 46, 2,  8, 7 }, /* DMAC DMTE6 */
+       { 47, 2,  8, 7 }, /* DMAC DMTE7 */
+       /* The following use INTC_INPRI00 for masking, which is a 32-bit
+          register, not a 16-bit register like the IPRx registers, so it
+          would need special support */
+       /*{ 72, INTPRI00,  8, ? },*/ /* TMU3 TUNI */
+       /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */
+};
+
+static unsigned long ipr_offsets[] = {
+       0xffd00004UL,   /* 0: IPRA */
+       0xffd00008UL,   /* 1: IPRB */
+       0xffd0000cUL,   /* 2: IPRC */
+       0xffd00010UL,   /* 3: IPRD */
+};
+
+/* given the IPR index return the address of the IPR register */
+unsigned int map_ipridx_to_addr(int idx)
+{
+       if (idx >= ARRAY_SIZE(ipr_offsets))
+               return 0;
+       return ipr_offsets[idx];
+}
+
+#define INTC_ICR       0xffd00000UL
+#define INTC_ICR_IRLM   (1<<7)
+
+/* enable individual interrupt mode for external interupts */
+void ipr_irq_enable_irlm(void)
+{
+       ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
+}
+
+void __init init_IRQ_ipr()
+{
+       make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map));
+#ifdef CONFIG_CPU_SUBTYPE_SH7751
+       make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map));
+#endif
+}
index 814ddb226531c26c92ed78efd27aea24b5cd98db..9aeaa2ddaa28d38a0e4e39f8ca6030adf4259671 100644 (file)
@@ -79,25 +79,27 @@ static int __init sh7780_devices_setup(void)
 __initcall(sh7780_devices_setup);
 
 static struct intc2_data intc2_irq_table[] = {
-       { TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 },
-       { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-       { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-       { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-       { SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-       { SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-       { SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
-       { SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
+       { 28, 0, 24, 0, 0, 2 },         /* TMU0 */
 
-       { SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-       { SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-       { SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
-       { SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY },
+       { 21, 1,  0, 0, 2, 2 },
+       { 22, 1,  1, 0, 2, 2 },
+       { 23, 1,  2, 0, 2, 2 },
 
-       { PCIC0_IRQ, 0x10,  8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY },
-       { PCIC1_IRQ, 0x10,  0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY },
-       { PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY },
-       { PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY },
-       { PCIC4_IRQ, 0x14,  8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY },
+       { 40, 8, 24, 0, 3, 3 },         /* SCIF0 ERI */
+       { 41, 8, 24, 0, 3, 3 },         /* SCIF0 RXI */
+       { 42, 8, 24, 0, 3, 3 },         /* SCIF0 BRI */
+       { 43, 8, 24, 0, 3, 3 },         /* SCIF0 TXI */
+
+       { 76, 8, 16, 0, 4, 3 },         /* SCIF1 ERI */
+       { 77, 8, 16, 0, 4, 3 },         /* SCIF1 RXI */
+       { 78, 8, 16, 0, 4, 3 },         /* SCIF1 BRI */
+       { 79, 8, 16, 0, 4, 3 },         /* SCIF1 TXI */
+
+       { 64, 0x10,  8, 0, 14, 2 },     /* PCIC0 */
+       { 65, 0x10,  0, 0, 15, 2 },     /* PCIC1 */
+       { 66, 0x14, 24, 0, 16, 2 },     /* PCIC2 */
+       { 67, 0x14, 16, 0, 17, 2 },     /* PCIC3 */
+       { 68, 0x14,  8, 0, 18, 2 },     /* PCIC4 */
 };
 
 void __init init_IRQ_intc2(void)
index 7bcc73f9b8df535ad8887383aeac71a8b2491ef5..0c9ea38d2caa0268a53179b75aec501bac0c443e 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <asm/cpu/sq.h>
@@ -38,7 +38,7 @@ struct sq_mapping {
 
 static struct sq_mapping *sq_mapping_list;
 static DEFINE_SPINLOCK(sq_mapping_lock);
-static kmem_cache_t *sq_cache;
+static struct kmem_cache *sq_cache;
 static unsigned long *sq_bitmap;
 
 #define store_queue_barrier()                  \
@@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len)
        /* Wait for completion */
        store_queue_barrier();
 }
+EXPORT_SYMBOL(sq_flush_range);
 
 static inline void sq_mapping_list_add(struct sq_mapping *map)
 {
@@ -166,7 +167,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
        map->size = size;
        map->name = name;
 
-       page = bitmap_find_free_region(sq_bitmap, 0x04000000,
+       page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
                                       get_order(map->size));
        if (unlikely(page < 0)) {
                ret = -ENOSPC;
@@ -193,6 +194,7 @@ out:
        kmem_cache_free(sq_cache, map);
        return ret;
 }
+EXPORT_SYMBOL(sq_remap);
 
 /**
  * sq_unmap - Unmap a Store Queue allocation
@@ -234,6 +236,7 @@ void sq_unmap(unsigned long vaddr)
 
        kmem_cache_free(sq_cache, map);
 }
+EXPORT_SYMBOL(sq_unmap);
 
 /*
  * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
@@ -402,7 +405,3 @@ module_exit(sq_api_exit);
 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
 MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(sq_remap);
-EXPORT_SYMBOL(sq_unmap);
-EXPORT_SYMBOL(sq_flush_range);
index a00022722e9e43c8ee4734e539ac97383be6790d..60340823798ab99f19d13ab31af5e3eb1ff45369 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/init.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #ifdef CONFIG_SH_STANDARD_BIOS
 #include <asm/sh_bios.h>
@@ -62,17 +62,9 @@ static struct console bios_console = {
 #include <linux/serial_core.h>
 #include "../../../drivers/serial/sh-sci.h"
 
-#ifdef CONFIG_CPU_SH4
-#define SCIF_REG       0xffe80000
-#elif defined(CONFIG_CPU_SUBTYPE_SH72060)
-#define SCIF_REG       0xfffe9800
-#else
-#error "Undefined SCIF for this subtype"
-#endif
-
 static struct uart_port scif_port = {
-       .mapbase        = SCIF_REG,
-       .membase        = (char __iomem *)SCIF_REG,
+       .mapbase        = CONFIG_EARLY_SCIF_CONSOLE_PORT,
+       .membase        = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT,
 };
 
 static void scif_sercon_putc(int c)
@@ -113,23 +105,29 @@ static struct console scif_console = {
        .index          = -1,
 };
 
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
+/*
+ * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4
+ * devices that aren't using sh-ipl+g.
+ */
 static void scif_sercon_init(int baud)
 {
-       ctrl_outw(0, SCIF_REG + 8);
-       ctrl_outw(0, SCIF_REG);
+       ctrl_outw(0, scif_port.mapbase + 8);
+       ctrl_outw(0, scif_port.mapbase);
 
        /* Set baud rate */
        ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) /
-                 (32 * baud) - 1, SCIF_REG + 4);
-
-       ctrl_outw(12, SCIF_REG + 24);
-       ctrl_outw(8, SCIF_REG + 24);
-       ctrl_outw(0, SCIF_REG + 32);
-       ctrl_outw(0x60, SCIF_REG + 16);
-       ctrl_outw(0, SCIF_REG + 36);
-       ctrl_outw(0x30, SCIF_REG + 8);
+                 (32 * baud) - 1, scif_port.mapbase + 4);
+
+       ctrl_outw(12, scif_port.mapbase + 24);
+       ctrl_outw(8, scif_port.mapbase + 24);
+       ctrl_outw(0, scif_port.mapbase + 32);
+       ctrl_outw(0x60, scif_port.mapbase + 16);
+       ctrl_outw(0, scif_port.mapbase + 36);
+       ctrl_outw(0x30, scif_port.mapbase + 8);
 }
-#endif
+#endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */
+#endif /* CONFIG_EARLY_SCIF_CONSOLE */
 
 /*
  * Setup a default console, if more than one is compiled in, rely on the
@@ -168,7 +166,7 @@ int __init setup_early_printk(char *opt)
        if (!strncmp(buf, "serial", 6)) {
                early_console = &scif_console;
 
-#ifdef CONFIG_CPU_SH4
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS)
                scif_sercon_init(115200);
 #endif
        }
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
new file mode 100644 (file)
index 0000000..29136a3
--- /dev/null
@@ -0,0 +1,433 @@
+/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
+ *
+ *  linux/arch/sh/entry.S
+ *
+ *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ *  Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*     
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * NOTE: This code uses a convention that instructions in the delay slot
+ * of a transfer-control instruction are indented by an extra space, thus:
+ *
+ *    jmp      @k0         ! control-transfer instruction
+ *     ldc     k1, ssr     ! delay slot
+ *
+ * Stack layout in 'ret_from_syscall':
+ *     ptrace needs to have all regs on the stack.
+ *     if the order here is changed, it needs to be
+ *     updated in ptrace.c and ptrace.h
+ *
+ *     r0
+ *      ...
+ *     r15 = stack pointer
+ *     spc
+ *     pr
+ *     ssr
+ *     gbr
+ *     mach
+ *     macl
+ *     syscall #
+ *
+ */
+
+#if defined(CONFIG_PREEMPT)
+#  define preempt_stop()       cli
+#else
+#  define preempt_stop()
+#  define resume_kernel                __restore_all
+#endif
+
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
+! If both are configured, handle the debug traps (breakpoints) in SW,
+! but still allow BIOS traps to FW.
+
+       .align  2
+debug_kernel:
+#if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
+       /* Force BIOS call to FW (debug_trap put TRA in r8) */
+       mov     r8,r0
+       shlr2   r0
+       cmp/eq  #0x3f,r0
+       bt      debug_kernel_fw
+#endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
+
+debug_enter:           
+#if defined(CONFIG_SH_KGDB)
+       /* Jump to kgdb, pass stacked regs as arg */
+debug_kernel_sw:
+       mov.l   3f, r0
+       jmp     @r0
+        mov    r15, r4
+       .align  2
+3:     .long   kgdb_handle_exception
+#endif /* CONFIG_SH_KGDB */
+
+#endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
+
+
+       .align  2
+debug_trap:    
+#if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
+       mov     #OFF_SR, r0
+       mov.l   @(r0,r15), r0           ! get status register
+       shll    r0
+       shll    r0                      ! kernel space?
+       bt/s    debug_kernel
+#endif
+        mov.l  @r15, r0                ! Restore R0 value
+       mov.l   1f, r8
+       jmp     @r8
+        nop
+
+       .align  2
+ENTRY(exception_error)
+       !
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   3f, r0
+       jsr     @r0
+        nop
+#endif
+       sti
+       mov.l   2f, r0
+       jmp     @r0
+        nop
+
+!
+       .align  2
+1:     .long   break_point_trap_software
+2:     .long   do_exception_error
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:     .long   trace_hardirqs_on
+#endif
+
+       .align  2
+ret_from_exception:
+       preempt_stop()
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   4f, r0
+       jsr     @r0
+        nop
+#endif
+ENTRY(ret_from_irq)
+       !
+       mov     #OFF_SR, r0
+       mov.l   @(r0,r15), r0   ! get status register
+       shll    r0
+       shll    r0              ! kernel space?
+       get_current_thread_info r8, r0
+       bt      resume_kernel   ! Yes, it's from kernel, go back soon
+
+#ifdef CONFIG_PREEMPT
+       bra     resume_userspace
+        nop
+ENTRY(resume_kernel)
+       mov.l   @(TI_PRE_COUNT,r8), r0  ! current_thread_info->preempt_count
+       tst     r0, r0
+       bf      noresched
+need_resched:
+       mov.l   @(TI_FLAGS,r8), r0      ! current_thread_info->flags
+       tst     #_TIF_NEED_RESCHED, r0  ! need_resched set?
+       bt      noresched
+
+       mov     #OFF_SR, r0
+       mov.l   @(r0,r15), r0           ! get status register
+       and     #0xf0, r0               ! interrupts off (exception path)?
+       cmp/eq  #0xf0, r0
+       bt      noresched
+
+       mov.l   1f, r0
+       mov.l   r0, @(TI_PRE_COUNT,r8)
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   3f, r0
+       jsr     @r0
+        nop
+#endif
+       sti
+       mov.l   2f, r0
+       jsr     @r0
+        nop
+       mov     #0, r0
+       mov.l   r0, @(TI_PRE_COUNT,r8)
+       cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   4f, r0
+       jsr     @r0
+        nop
+#endif
+
+       bra     need_resched
+        nop
+
+noresched:
+       bra     __restore_all
+        nop
+
+       .align 2
+1:     .long   PREEMPT_ACTIVE
+2:     .long   schedule
+#ifdef CONFIG_TRACE_IRQFLAGS
+3:     .long   trace_hardirqs_on
+4:     .long   trace_hardirqs_off
+#endif
+#endif
+
+ENTRY(resume_userspace)
+       ! r8: current_thread_info
+       cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   5f, r0
+       jsr     @r0
+        nop
+#endif
+       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
+       tst     #_TIF_WORK_MASK, r0
+       bt/s    __restore_all
+        tst    #_TIF_NEED_RESCHED, r0
+
+       .align  2
+work_pending:
+       ! r0: current_thread_info->flags
+       ! r8: current_thread_info
+       ! t:  result of "tst    #_TIF_NEED_RESCHED, r0"
+       bf/s    work_resched
+        tst    #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
+work_notifysig:
+       bt/s    __restore_all
+        mov    r15, r4
+       mov     r12, r5         ! set arg1(save_r0)
+       mov     r0, r6
+       mov.l   2f, r1
+       mov.l   3f, r0
+       jmp     @r1
+        lds    r0, pr
+work_resched:
+#ifndef CONFIG_PREEMPT
+       ! gUSA handling
+       mov.l   @(OFF_SP,r15), r0       ! get user space stack pointer
+       mov     r0, r1
+       shll    r0
+       bf/s    1f
+        shll   r0
+       bf/s    1f
+        mov    #OFF_PC, r0
+       !                                 SP >= 0xc0000000 : gUSA mark
+       mov.l   @(r0,r15), r2           ! get user space PC (program counter)
+       mov.l   @(OFF_R0,r15), r3       ! end point
+       cmp/hs  r3, r2                  ! r2 >= r3? 
+       bt      1f
+       add     r3, r1                  ! rewind point #2
+       mov.l   r1, @(r0,r15)           ! reset PC to rewind point #2
+       !
+1:
+#endif
+       mov.l   1f, r1
+       jsr     @r1                             ! schedule
+        nop
+       cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   5f, r0
+       jsr     @r0
+        nop
+#endif
+       !
+       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
+       tst     #_TIF_WORK_MASK, r0
+       bt      __restore_all
+       bra     work_pending
+        tst    #_TIF_NEED_RESCHED, r0
+
+       .align  2
+1:     .long   schedule
+2:     .long   do_notify_resume
+3:     .long   restore_all
+#ifdef CONFIG_TRACE_IRQFLAGS
+4:     .long   trace_hardirqs_on
+5:     .long   trace_hardirqs_off
+#endif
+
+       .align  2
+syscall_exit_work:
+       ! r0: current_thread_info->flags
+       ! r8: current_thread_info
+       tst     #_TIF_SYSCALL_TRACE, r0
+       bt/s    work_pending
+        tst    #_TIF_NEED_RESCHED, r0
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   5f, r0
+       jsr     @r0
+        nop
+#endif
+       sti
+       ! XXX setup arguments...
+       mov.l   4f, r0                  ! do_syscall_trace
+       jsr     @r0
+        nop
+       bra     resume_userspace
+        nop
+
+       .align  2
+syscall_trace_entry:
+       !                       Yes it is traced.
+       ! XXX setup arguments...
+       mov.l   4f, r11         ! Call do_syscall_trace which notifies
+       jsr     @r11            ! superior (will chomp R[0-7])
+        nop
+       !                       Reload R0-R4 from kernel stack, where the
+       !                       parent may have modified them using
+       !                       ptrace(POKEUSR).  (Note that R0-R2 are
+       !                       used by the system call handler directly
+       !                       from the kernel stack anyway, so don't need
+       !                       to be reloaded here.)  This allows the parent
+       !                       to rewrite system calls and args on the fly.
+       mov.l   @(OFF_R4,r15), r4   ! arg0
+       mov.l   @(OFF_R5,r15), r5
+       mov.l   @(OFF_R6,r15), r6
+       mov.l   @(OFF_R7,r15), r7   ! arg3
+       mov.l   @(OFF_R3,r15), r3   ! syscall_nr
+       !
+       mov.l   2f, r10                 ! Number of syscalls
+       cmp/hs  r10, r3
+       bf      syscall_call
+       mov     #-ENOSYS, r0
+       bra     syscall_exit
+        mov.l  r0, @(OFF_R0,r15)       ! Return value
+
+__restore_all:
+       mov.l   1f, r0
+       jmp     @r0
+        nop
+
+       .align  2
+1:     .long   restore_all
+
+       .align  2
+not_syscall_tra:       
+       bra     debug_trap
+        nop
+
+       .align  2
+syscall_badsys:                        ! Bad syscall number
+       mov     #-ENOSYS, r0
+       bra     resume_userspace
+        mov.l  r0, @(OFF_R0,r15)       ! Return value
+       
+
+/*
+ * Syscall interface:
+ *
+ *     Syscall #: R3
+ *     Arguments #0 to #3: R4--R7
+ *     Arguments #4 to #6: R0, R1, R2
+ *     TRA: (number of arguments + 0x10) x 4
+ *
+ * This code also handles delegating other traps to the BIOS/gdb stub
+ * according to:
+ *
+ * Trap number
+ * (TRA>>2)        Purpose
+ * --------        -------
+ * 0x0-0xf         old syscall ABI
+ * 0x10-0x1f       new syscall ABI
+ * 0x20-0xff       delegated through debug_trap to BIOS/gdb stub.
+ *
+ * Note: When we're first called, the TRA value must be shifted
+ * right 2 bits in order to get the value that was used as the "trapa"
+ * argument.
+ */
+
+       .align  2
+       .globl  ret_from_fork
+ret_from_fork:
+       mov.l   1f, r8
+       jsr     @r8
+        mov    r0, r4
+       bra     syscall_exit
+        nop
+       .align  2
+1:     .long   schedule_tail
+       !
+ENTRY(system_call)
+#if !defined(CONFIG_CPU_SH2)
+       mov.l   1f, r9
+       mov.l   @r9, r8         ! Read from TRA (Trap Address) Register
+#endif
+       !
+       ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
+       mov     #0x7f, r9
+       cmp/hi  r9, r8
+       bt/s    not_syscall_tra
+        mov    #OFF_TRA, r9
+       add     r15, r9
+       mov.l   r8, @r9                 ! set TRA value to tra
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   5f, r10
+       jsr     @r10
+        nop
+#endif
+       sti
+
+       !
+       get_current_thread_info r8, r10
+       mov.l   @(TI_FLAGS,r8), r8
+       mov     #_TIF_SYSCALL_TRACE, r10
+       tst     r10, r8
+       bf      syscall_trace_entry
+       !
+       mov.l   2f, r8                  ! Number of syscalls
+       cmp/hs  r8, r3
+       bt      syscall_badsys
+       !
+syscall_call:
+       shll2   r3              ! x4
+       mov.l   3f, r8          ! Load the address of sys_call_table
+       add     r8, r3
+       mov.l   @r3, r8
+       jsr     @r8             ! jump to specific syscall handler
+        nop
+       mov.l   @(OFF_R0,r15), r12              ! save r0
+       mov.l   r0, @(OFF_R0,r15)               ! save the return value
+       !
+syscall_exit:
+       cli
+#ifdef CONFIG_TRACE_IRQFLAGS
+       mov.l   6f, r0
+       jsr     @r0
+        nop
+#endif
+       !
+       get_current_thread_info r8, r0
+       mov.l   @(TI_FLAGS,r8), r0              ! current_thread_info->flags
+       tst     #_TIF_ALLWORK_MASK, r0
+       bf      syscall_exit_work
+       bra     __restore_all
+        nop
+       .align  2
+#if !defined(CONFIG_CPU_SH2)
+1:     .long   TRA
+#endif
+2:     .long   NR_syscalls
+3:     .long   sys_call_table
+4:     .long   do_syscall_trace
+#ifdef CONFIG_TRACE_IRQFLAGS
+5:     .long   trace_hardirqs_on
+6:     .long   trace_hardirqs_off
+#endif
index f5f53d14f2456aaf61a9a0d34468efde924864b8..6aca4bc6ec5d55e5dd649edb36be0aa3702d7925 100644 (file)
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page)
        .long   0x00360000      /* INITRD_START */
        .long   0x000a0000      /* INITRD_SIZE */
        .long   0
-       .balign 4096,0,4096
+       .balign PAGE_SIZE,0,PAGE_SIZE
 
        .text   
 /*
@@ -53,8 +53,10 @@ ENTRY(_stext)
        ldc     r0, sr
        !                       Initialize global interrupt mask
        mov     #0, r0
+#ifdef CONFIG_CPU_HAS_SR_RB
        ldc     r0, r6_bank
-
+#endif
+       
        /*
         * Prefetch if possible to reduce cache miss penalty.
         *
@@ -68,11 +70,14 @@ ENTRY(_stext)
        !
        mov.l   2f, r0
        mov     r0, r15         ! Set initial r15 (stack pointer)
-       mov     #(THREAD_SIZE >> 8), r1
+       mov     #(THREAD_SIZE >> 10), r1
        shll8   r1              ! r1 = THREAD_SIZE
+       shll2   r1
        sub     r1, r0          !
+#ifdef CONFIG_CPU_HAS_SR_RB
        ldc     r0, r7_bank     ! ... and initial thread_info
-
+#endif
+       
        !                       Clear BSS area
        mov.l   3f, r1
        add     #4, r1
@@ -95,7 +100,11 @@ ENTRY(_stext)
         nop
 
        .balign 4
+#if defined(CONFIG_CPU_SH2)
+1:     .long   0x000000F0              ! IMASK=0xF
+#else
 1:     .long   0x400080F0              ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
+#endif
 2:     .long   init_thread_union+THREAD_SIZE
 3:     .long   __bss_start
 4:     .long   _end
index 944128ce97066ff46bc67cae6a5ce98a3717e9d6..67be2b6e8cd1657cdf657d50259806572c236580 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/io.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/thread_info.h>
@@ -78,15 +78,16 @@ union irq_ctx {
        u32                     stack[THREAD_SIZE/sizeof(u32)];
 };
 
-static union irq_ctx *hardirq_ctx[NR_CPUS];
-static union irq_ctx *softirq_ctx[NR_CPUS];
+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 #endif
 
 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
                      unsigned long r6, unsigned long r7,
-                     struct pt_regs regs)
+                     struct pt_regs __regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(&regs);
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       struct pt_regs *old_regs = set_irq_regs(regs);
        int irq;
 #ifdef CONFIG_4KSTACKS
        union irq_ctx *curctx, *irqctx;
@@ -111,7 +112,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
 #endif
 
 #ifdef CONFIG_CPU_HAS_INTEVT
-       irq = (ctrl_inl(INTEVT) >> 5) - 16;
+       irq = evt2irq(ctrl_inl(INTEVT));
 #else
        irq = r4;
 #endif
@@ -135,17 +136,24 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
                irqctx->tinfo.task = curctx->tinfo.task;
                irqctx->tinfo.previous_sp = current_stack_pointer;
 
+               /*
+                * Copy the softirq bits in preempt_count so that the
+                * softirq checks work in the hardirq context.
+                */
+               irqctx->tinfo.preempt_count =
+                       (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+                       (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
+
                __asm__ __volatile__ (
                        "mov    %0, r4          \n"
-                       "mov    r15, r9         \n"
+                       "mov    r15, r8         \n"
                        "jsr    @%1             \n"
                        /* swith to the irq stack */
                        " mov   %2, r15         \n"
                        /* restore the stack (ring zero) */
-                       "mov    r9, r15         \n"
+                       "mov    r8, r15         \n"
                        : /* no outputs */
                        : "r" (irq), "r" (generic_handle_irq), "r" (isp)
-                       /* XXX: A somewhat excessive clobber list? -PFM */
                        : "memory", "r0", "r1", "r2", "r3", "r4",
                          "r5", "r6", "r7", "r8", "t", "pr"
                );
@@ -193,7 +201,7 @@ void irq_ctx_init(int cpu)
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
+       irqctx->tinfo.preempt_count     = 0;
        irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 
        softirq_ctx[cpu] = irqctx;
@@ -239,13 +247,38 @@ asmlinkage void do_softirq(void)
                        "mov    r9, r15         \n"
                        : /* no outputs */
                        : "r" (__do_softirq), "r" (isp)
-                       /* XXX: A somewhat excessive clobber list? -PFM */
                        : "memory", "r0", "r1", "r2", "r3", "r4",
                          "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
                );
+
+               /*
+                * Shouldnt happen, we returned above if in_interrupt():
+                */
+               WARN_ON_ONCE(softirq_count());
        }
 
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(do_softirq);
 #endif
+
+void __init init_IRQ(void)
+{
+#ifdef CONFIG_CPU_HAS_PINT_IRQ
+       init_IRQ_pint();
+#endif
+
+#ifdef CONFIG_CPU_HAS_INTC2_IRQ
+       init_IRQ_intc2();
+#endif
+
+#ifdef CONFIG_CPU_HAS_IPR_IRQ
+       init_IRQ_ipr();
+#endif
+
+       /* Perform the machine specific initialisation */
+       if (sh_mv.mv_init_irq)
+               sh_mv.mv_init_irq();
+
+       irq_ctx_init(smp_processor_id());
+}
index a52b13ac6b7f80a32e81a5daeef04a115de406da..f3e2631be14456033c845eb28f0760ef8a875cea 100644 (file)
@@ -385,10 +385,11 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
 
 asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
                        unsigned long r6, unsigned long r7,
-                       struct pt_regs regs)
+                       struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 #ifdef CONFIG_MMU
-       return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
+       return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL);
 #else
        /* fork almost works, enough to trick you into looking elsewhere :-( */
        return -EINVAL;
@@ -398,11 +399,12 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
                         unsigned long parent_tidptr,
                         unsigned long child_tidptr,
-                        struct pt_regs regs)
+                        struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        if (!newsp)
-               newsp = regs.regs[15];
-       return do_fork(clone_flags, newsp, &regs, 0,
+               newsp = regs->regs[15];
+       return do_fork(clone_flags, newsp, regs, 0,
                        (int __user *)parent_tidptr, (int __user *)child_tidptr);
 }
 
@@ -418,9 +420,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
  */
 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
                         unsigned long r6, unsigned long r7,
-                        struct pt_regs regs)
+                        struct pt_regs __regs)
 {
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs,
                       0, NULL, NULL);
 }
 
@@ -429,8 +432,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
  */
 asmlinkage int sys_execve(char *ufilename, char **uargv,
                          char **uenvp, unsigned long r7,
-                         struct pt_regs regs)
+                         struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        int error;
        char *filename;
 
@@ -442,7 +446,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv,
        error = do_execve(filename,
                          (char __user * __user *)uargv,
                          (char __user * __user *)uenvp,
-                         &regs);
+                         regs);
        if (error == 0) {
                task_lock(current);
                current->ptrace &= ~PT_DTRACE;
@@ -472,9 +476,7 @@ unsigned long get_wchan(struct task_struct *p)
        return pc;
 }
 
-asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
-                                unsigned long r6, unsigned long r7,
-                                struct pt_regs regs)
+asmlinkage void break_point_trap(void)
 {
        /* Clear tracing.  */
 #if defined(CONFIG_CPU_SH4A)
@@ -492,8 +494,10 @@ asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
 
 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
                                          unsigned long r6, unsigned long r7,
-                                         struct pt_regs regs)
+                                         struct pt_regs __regs)
 {
-       regs.pc -= 2;
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+       regs->pc -= 2;
        force_sig(SIGTRAP, current);
 }
index 8221b37c97733872529afec598aea9046940df1d..c66cb3209db50ea6d43a38346fdcefe2e1e7dc0c 100644 (file)
@@ -7,11 +7,9 @@
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  */
-
 #include <linux/linkage.h>
-
-#define PAGE_SIZE      4096 /* must be same value as in <asm/page.h> */
-
+#include <asm/addrspace.h>
+#include <asm/page.h>
 
                .globl relocate_new_kernel
 relocate_new_kernel:
@@ -20,8 +18,8 @@ relocate_new_kernel:
        /* r6 = start_address      */
        /* r7 = vbr_reg            */
 
-       mov.l   10f,r8    /* 4096 */
-       mov.l   11f,r9    /* 0xa0000000 */
+       mov.l   10f,r8    /* PAGE_SIZE */
+       mov.l   11f,r9    /* P2SEG */
 
        /*  stack setting */
        add     r8,r5
@@ -32,7 +30,7 @@ relocate_new_kernel:
 0:
        mov.l   @r4+,r0   /* cmd = *ind++ */
 
-1:     /* addr = (cmd | 0xa0000000) & 0xfffffff0 */
+1:     /* addr = (cmd | P2SEG) & 0xfffffff0 */
        mov     r0,r2
        or      r9,r2
        mov     #-16,r1
@@ -92,7 +90,7 @@ relocate_new_kernel:
 10:
        .long   PAGE_SIZE
 11:
-       .long   0xa0000000
+       .long   P2SEG
 
 relocate_new_kernel_end:
 
index 36d86f9ac38a78b2834c6ba6551c1a6053ac76ef..f8dd6b7bfab05439bc5623bb34ce22230669c06b 100644 (file)
@@ -332,8 +332,7 @@ void __init setup_arch(char **cmdline_p)
        if (LOADER_TYPE && INITRD_START) {
                if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
                        reserve_bootmem_node(NODE_DATA(0), INITRD_START+__MEMORY_START, INITRD_SIZE);
-                       initrd_start =
-                               INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0;
+                       initrd_start = INITRD_START + PAGE_OFFSET + __MEMORY_START;
                        initrd_end = initrd_start + INITRD_SIZE;
                } else {
                        printk("initrd extends beyond end of memory "
@@ -392,6 +391,7 @@ static int __init topology_init(void)
 subsys_initcall(topology_init);
 
 static const char *cpu_name[] = {
+       [CPU_SH7206]    = "SH7206",     [CPU_SH7619]    = "SH7619",
        [CPU_SH7604]    = "SH7604",     [CPU_SH7300]    = "SH7300",
        [CPU_SH7705]    = "SH7705",     [CPU_SH7706]    = "SH7706",
        [CPU_SH7707]    = "SH7707",     [CPU_SH7708]    = "SH7708",
@@ -404,6 +404,7 @@ static const char *cpu_name[] = {
        [CPU_SH4_202]   = "SH4-202",    [CPU_SH4_501]   = "SH4-501",
        [CPU_SH7770]    = "SH7770",     [CPU_SH7780]    = "SH7780",
        [CPU_SH7781]    = "SH7781",     [CPU_SH7343]    = "SH7343",
+       [CPU_SH7785]    = "SH7785",
        [CPU_SH_NONE]   = "Unknown"
 };
 
index 8a2fd19dc9ebf7b429954cee3837bbf834266997..ceee7914340121bb7ba33664b528869233bb123b 100644 (file)
@@ -73,8 +73,6 @@ DECLARE_EXPORT(__lshrdi3);
 DECLARE_EXPORT(__movstr);
 DECLARE_EXPORT(__movstrSI16);
 
-EXPORT_SYMBOL(strcpy);
-
 #ifdef CONFIG_CPU_SH4
 DECLARE_EXPORT(__movstr_i4_even);
 DECLARE_EXPORT(__movstr_i4_odd);
@@ -101,10 +99,6 @@ EXPORT_SYMBOL(__down_trylock);
 EXPORT_SYMBOL(synchronize_irq);
 #endif
 
-#ifdef CONFIG_PM
-EXPORT_SYMBOL(pm_suspend);
-#endif
-
 EXPORT_SYMBOL(csum_partial);
 #ifdef CONFIG_IPV6
 EXPORT_SYMBOL(csum_ipv6_magic);
index 5213f5bc6ce0832c395f5b69f4eeacf407a41479..bb1c480a59c74f2713a39482e5d89fba8314e886 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/elf.h>
 #include <linux/personality.h>
 #include <linux/binfmts.h>
+#include <linux/freezer.h>
 
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
@@ -37,7 +38,7 @@
 asmlinkage int
 sys_sigsuspend(old_sigset_t mask,
               unsigned long r5, unsigned long r6, unsigned long r7,
-              struct pt_regs regs)
+              struct pt_regs __regs)
 {
        mask &= _BLOCKABLE;
        spin_lock_irq(&current->sighand->siglock);
@@ -52,7 +53,7 @@ sys_sigsuspend(old_sigset_t mask,
        return -ERESTARTNOHAND;
 }
 
-asmlinkage int 
+asmlinkage int
 sys_sigaction(int sig, const struct old_sigaction __user *act,
              struct old_sigaction __user *oact)
 {
@@ -87,9 +88,11 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
 asmlinkage int
 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
                unsigned long r6, unsigned long r7,
-               struct pt_regs regs)
+               struct pt_regs __regs)
 {
-       return do_sigaltstack(uss, uoss, regs.regs[15]);
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+
+       return do_sigaltstack(uss, uoss, regs->regs[15]);
 }
 
 
@@ -98,7 +101,11 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
  */
 
 #define MOVW(n)         (0x9300|((n)-2))       /* Move mem word at PC+n to R3 */
-#define TRAP16  0xc310                 /* Syscall w/no args (NR in R3) */
+#if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
+#define TRAP_NOARG 0xc320              /* Syscall w/no args (NR in R3) */
+#else
+#define TRAP_NOARG 0xc310              /* Syscall w/no args (NR in R3) */
+#endif
 #define OR_R0_R0 0x200b                        /* or r0,r0 (insert to avoid hardware bug) */
 
 struct sigframe
@@ -194,9 +201,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
 
 asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
                             unsigned long r6, unsigned long r7,
-                            struct pt_regs regs)
+                            struct pt_regs __regs)
 {
-       struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15];
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
        sigset_t set;
        int r0;
 
@@ -216,7 +224,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(&regs, &frame->sc, &r0))
+       if (restore_sigcontext(regs, &frame->sc, &r0))
                goto badframe;
        return r0;
 
@@ -227,9 +235,10 @@ badframe:
 
 asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
                                unsigned long r6, unsigned long r7,
-                               struct pt_regs regs)
+                               struct pt_regs __regs)
 {
-       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15];
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
        sigset_t set;
        stack_t st;
        int r0;
@@ -246,14 +255,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+       if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
                goto badframe;
 
        if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
                goto badframe;
        /* It is more difficult to avoid calling this function than to
           call it and ignore errors.  */
-       do_sigaltstack(&st, NULL, regs.regs[15]);
+       do_sigaltstack(&st, NULL, regs->regs[15]);
 
        return r0;
 
@@ -350,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        } else {
                /* Generate return code (system call to sigreturn) */
                err |= __put_user(MOVW(7), &frame->retcode[0]);
-               err |= __put_user(TRAP16, &frame->retcode[1]);
+               err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
                err |= __put_user(OR_R0_R0, &frame->retcode[2]);
                err |= __put_user(OR_R0_R0, &frame->retcode[3]);
                err |= __put_user(OR_R0_R0, &frame->retcode[4]);
@@ -430,7 +439,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        } else {
                /* Generate return code (system call to rt_sigreturn) */
                err |= __put_user(MOVW(7), &frame->retcode[0]);
-               err |= __put_user(TRAP16, &frame->retcode[1]);
+               err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
                err |= __put_user(OR_R0_R0, &frame->retcode[2]);
                err |= __put_user(OR_R0_R0, &frame->retcode[3]);
                err |= __put_user(OR_R0_R0, &frame->retcode[4]);
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..0d5268a
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * arch/sh/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ *  Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <asm/ptrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+       unsigned long *sp;
+
+       if (!task)
+               task = current;
+       if (task == current)
+               sp = (unsigned long *)current_stack_pointer;
+       else
+               sp = (unsigned long *)task->thread.sp;
+
+       while (!kstack_end(sp)) {
+               unsigned long addr = *sp++;
+
+               if (__kernel_text_address(addr)) {
+                       if (trace->skip > 0)
+                               trace->skip--;
+                       else
+                               trace->entries[trace->nr_entries++] = addr;
+                       if (trace->nr_entries >= trace->max_entries)
+                               break;
+               }
+       }
+}
index 8fde95001c346873744057040b85c6f3cdbb094d..5083b6ed4b39a5b1fe07bb32a9a1e9bc42d47383 100644 (file)
  */
 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
        unsigned long r6, unsigned long r7,
-       struct pt_regs regs)
+       struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        int fd[2];
        int error;
 
        error = do_pipe(fd);
        if (!error) {
-               regs.regs[1] = fd[1];
+               regs->regs[1] = fd[1];
                return fd[0];
        }
        return error;
@@ -50,6 +51,7 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
 
 EXPORT_SYMBOL(shm_align_mask);
 
+#ifdef CONFIG_MMU
 /*
  * To avoid cache aliases, we map the shared page with same color.
  */
@@ -135,6 +137,7 @@ full_search:
                        addr = COLOUR_ALIGN(addr, pgoff);
        }
 }
+#endif /* CONFIG_MMU */
 
 static inline long
 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 
index 57e708d7b52df705cdf6ebcf48d1608485dc2abd..c206c9504c4bb40da950a374ead73943f9ab36a4 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/profile.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
 #include <asm/clock.h>
 #include <asm/rtc.h>
 #include <asm/timer.h>
@@ -50,15 +52,20 @@ unsigned long long __attribute__ ((weak)) sched_clock(void)
 #ifndef CONFIG_GENERIC_TIME
 void do_gettimeofday(struct timeval *tv)
 {
+       unsigned long flags;
        unsigned long seq;
        unsigned long usec, sec;
 
        do {
-               seq = read_seqbegin(&xtime_lock);
+               /*
+                * Turn off IRQs when grabbing xtime_lock, so that
+                * the sys_timer get_offset code doesn't have to handle it.
+                */
+               seq = read_seqbegin_irqsave(&xtime_lock, flags);
                usec = get_timer_offset();
                sec = xtime.tv_sec;
-               usec += xtime.tv_nsec / 1000;
-       } while (read_seqretry(&xtime_lock, seq));
+               usec += xtime.tv_nsec / NSEC_PER_USEC;
+       } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
 
        while (usec >= 1000000) {
                usec -= 1000000;
@@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv)
         * wall time.  Discover what correction gettimeofday() would have
         * made, and then undo it!
         */
-       nsec -= 1000 * get_timer_offset();
+       nsec -= get_timer_offset() * NSEC_PER_USEC;
 
        wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
        wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -169,6 +176,108 @@ static struct sysdev_class timer_sysclass = {
        .resume  = timer_resume,
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+static int timer_dyn_tick_enable(void)
+{
+       struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+       unsigned long flags;
+       int ret = -ENODEV;
+
+       if (dyn_tick) {
+               spin_lock_irqsave(&dyn_tick->lock, flags);
+               ret = 0;
+               if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
+                       ret = dyn_tick->enable();
+
+                       if (ret == 0)
+                               dyn_tick->state |= DYN_TICK_ENABLED;
+               }
+               spin_unlock_irqrestore(&dyn_tick->lock, flags);
+       }
+
+       return ret;
+}
+
+static int timer_dyn_tick_disable(void)
+{
+       struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+       unsigned long flags;
+       int ret = -ENODEV;
+
+       if (dyn_tick) {
+               spin_lock_irqsave(&dyn_tick->lock, flags);
+               ret = 0;
+               if (dyn_tick->state & DYN_TICK_ENABLED) {
+                       ret = dyn_tick->disable();
+
+                       if (ret == 0)
+                               dyn_tick->state &= ~DYN_TICK_ENABLED;
+               }
+               spin_unlock_irqrestore(&dyn_tick->lock, flags);
+       }
+
+       return ret;
+}
+
+/*
+ * Reprogram the system timer for at least the calculated time interval.
+ * This function should be called from the idle thread with IRQs disabled,
+ * immediately before sleeping.
+ */
+void timer_dyn_reprogram(void)
+{
+       struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
+       unsigned long next, seq, flags;
+
+       if (!dyn_tick)
+               return;
+
+       spin_lock_irqsave(&dyn_tick->lock, flags);
+       if (dyn_tick->state & DYN_TICK_ENABLED) {
+               next = next_timer_interrupt();
+               do {
+                       seq = read_seqbegin(&xtime_lock);
+                       dyn_tick->reprogram(next - jiffies);
+               } while (read_seqretry(&xtime_lock, seq));
+       }
+       spin_unlock_irqrestore(&dyn_tick->lock, flags);
+}
+
+static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
+{
+       return sprintf(buf, "%i\n",
+                      (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
+}
+
+static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
+                                 size_t count)
+{
+       unsigned int enable = simple_strtoul(buf, NULL, 2);
+
+       if (enable)
+               timer_dyn_tick_enable();
+       else
+               timer_dyn_tick_disable();
+
+       return count;
+}
+static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
+
+/*
+ * dyntick=enable|disable
+ */
+static char dyntick_str[4] __initdata = "";
+
+static int __init dyntick_setup(char *str)
+{
+       if (str)
+               strlcpy(dyntick_str, str, sizeof(dyntick_str));
+       return 1;
+}
+
+__setup("dyntick=", dyntick_setup);
+#endif
+
 static int __init timer_init_sysfs(void)
 {
        int ret = sysdev_class_register(&timer_sysclass);
@@ -176,7 +285,22 @@ static int __init timer_init_sysfs(void)
                return ret;
 
        sys_timer->dev.cls = &timer_sysclass;
-       return sysdev_register(&sys_timer->dev);
+       ret = sysdev_register(&sys_timer->dev);
+
+#ifdef CONFIG_NO_IDLE_HZ
+       if (ret == 0 && sys_timer->dyn_tick) {
+               ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
+
+               /*
+                * Turn on dynamic tick after calibrate delay
+                * for correct bogomips
+                */
+               if (ret == 0 && dyntick_str[0] == 'e')
+                       ret = timer_dyn_tick_enable();
+       }
+#endif
+
+       return ret;
 }
 device_initcall(timer_init_sysfs);
 
@@ -200,6 +324,11 @@ void __init time_init(void)
        sys_timer = get_sys_timer();
        printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
 
+#ifdef CONFIG_NO_IDLE_HZ
+       if (sys_timer->dyn_tick)
+               spin_lock_init(&sys_timer->dyn_tick->lock);
+#endif
+
 #if defined(CONFIG_SH_KGDB)
        /*
         * Set up kgdb as requested. We do it here because the serial
index 151a6a304ceca4ca484223d1707d799c184be300..bcf244ff6a128a90698257804490f9d6c3d4be97 100644 (file)
@@ -5,4 +5,6 @@
 obj-y  := timer.o
 
 obj-$(CONFIG_SH_TMU)           += timer-tmu.o
+obj-$(CONFIG_SH_MTU2)          += timer-mtu2.o
+obj-$(CONFIG_SH_CMT)           += timer-cmt.o
 
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c
new file mode 100644 (file)
index 0000000..a574b93
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support
+ *
+ *  Copyright (C) 2005  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/rtc.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CMT_CMSTR      0xf84a0070
+#define CMT_CMCSR_0    0xf84a0072
+#define CMT_CMCNT_0    0xf84a0074
+#define CMT_CMCOR_0    0xf84a0076
+#define CMT_CMCSR_1    0xf84a0078
+#define CMT_CMCNT_1    0xf84a007a
+#define CMT_CMCOR_1    0xf84a007c
+
+#define STBCR3         0xf80a0000
+#define cmt_clock_enable() do {        ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0)
+#define CMT_CMCSR_INIT 0x0040
+#define CMT_CMCSR_CALIB        0x0000
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define CMT_CMSTR      0xfffec000
+#define CMT_CMCSR_0    0xfffec002
+#define CMT_CMCNT_0    0xfffec004
+#define CMT_CMCOR_0    0xfffec006
+
+#define STBCR4         0xfffe040c
+#define cmt_clock_enable() do {        ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0)
+#define CMT_CMCSR_INIT 0x0040
+#define CMT_CMCSR_CALIB        0x0000
+#else
+#error "Unknown CPU SUBTYPE"
+#endif
+
+static unsigned long cmt_timer_get_offset(void)
+{
+       int count;
+       static unsigned short count_p = 0xffff;    /* for the first call after boot */
+       static unsigned long jiffies_p = 0;
+
+       /*
+        * cache volatile jiffies temporarily; we have IRQs turned off.
+        */
+       unsigned long jiffies_t;
+
+       /* timer count may underflow right here */
+       count =  ctrl_inw(CMT_CMCOR_0);
+       count -= ctrl_inw(CMT_CMCNT_0);
+
+       jiffies_t = jiffies;
+
+       /*
+        * avoiding timer inconsistencies (they are rare, but they happen)...
+        * there is one kind of problem that must be avoided here:
+        *  1. the timer counter underflows
+        */
+
+       if (jiffies_t == jiffies_p) {
+               if (count > count_p) {
+                       /* the nutcase */
+                       if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */
+                               count -= LATCH;
+                       } else {
+                               printk("%s (): hardware timer problem?\n",
+                                      __FUNCTION__);
+                       }
+               }
+       } else
+               jiffies_p = jiffies_t;
+
+       count_p = count;
+
+       count = ((LATCH-1) - count) * TICK_SIZE;
+       count = (count + LATCH/2) / LATCH;
+
+       return count;
+}
+
+static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
+{
+       unsigned long timer_status;
+
+       /* Clear CMF bit */
+       timer_status = ctrl_inw(CMT_CMCSR_0);
+       timer_status &= ~0x80;
+       ctrl_outw(timer_status, CMT_CMCSR_0);
+
+       /*
+        * Here we are in the timer irq handler. We just have irqs locally
+        * disabled but we don't know if the timer_bh is running on the other
+        * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+        * the irq version of write_lock because as just said we have irq
+        * locally disabled. -arca
+        */
+       write_seqlock(&xtime_lock);
+       handle_timer_tick();
+       write_sequnlock(&xtime_lock);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction cmt_irq = {
+       .name           = "timer",
+       .handler        = cmt_timer_interrupt,
+       .flags          = IRQF_DISABLED | IRQF_TIMER,
+       .mask           = CPU_MASK_NONE,
+};
+
+static void cmt_clk_init(struct clk *clk)
+{
+       u8 divisor = CMT_CMCSR_INIT & 0x3;
+       ctrl_inw(CMT_CMCSR_0);
+       ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0);
+       clk->parent = clk_get(NULL, "module_clk");
+       clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static void cmt_clk_recalc(struct clk *clk)
+{
+       u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3;
+       clk->rate = clk->parent->rate / (8 << (divisor << 1));
+}
+
+static struct clk_ops cmt_clk_ops = {
+       .init           = cmt_clk_init,
+       .recalc         = cmt_clk_recalc,
+};
+
+static struct clk cmt0_clk = {
+       .name           = "cmt0_clk",
+       .ops            = &cmt_clk_ops,
+};
+
+static int cmt_timer_start(void)
+{
+       ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR);
+       return 0;
+}
+
+static int cmt_timer_stop(void)
+{
+       ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR);
+       return 0;
+}
+
+static int cmt_timer_init(void)
+{
+       unsigned long interval;
+
+       cmt_clock_enable();
+
+       setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq);
+
+       cmt0_clk.parent = clk_get(NULL, "module_clk");
+
+       cmt_timer_stop();
+
+       interval = cmt0_clk.parent->rate / 8 / HZ;
+       printk(KERN_INFO "Interval = %ld\n", interval);
+
+       ctrl_outw(interval, CMT_CMCOR_0);
+
+       clk_register(&cmt0_clk);
+       clk_enable(&cmt0_clk);
+
+       cmt_timer_start();
+
+       return 0;
+}
+
+struct sys_timer_ops cmt_timer_ops = {
+       .init           = cmt_timer_init,
+       .start          = cmt_timer_start,
+       .stop           = cmt_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+       .get_offset     = cmt_timer_get_offset,
+#endif
+};
+
+struct sys_timer cmt_timer = {
+       .name   = "cmt",
+       .ops    = &cmt_timer_ops,
+};
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c
new file mode 100644 (file)
index 0000000..fffcd1c
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support
+ *
+ *  Copyright (C) 2005  Paul Mundt
+ *
+ * Based off of arch/sh/kernel/timers/timer-tmu.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/seqlock.h>
+#include <asm/timer.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/clock.h>
+
+/*
+ * We use channel 1 for our lowly system timer. Channel 2 would be the other
+ * likely candidate, but we leave it alone as it has higher divisors that
+ * would be of more use to other more interesting applications.
+ *
+ * TODO: Presently we only implement a 16-bit single-channel system timer.
+ * However, we can implement channel cascade if we go the overflow route and
+ * get away with using 2 MTU2 channels as a 32-bit timer.
+ */
+#define MTU2_TSTR      0xfffe4280
+#define MTU2_TCR_1     0xfffe4380
+#define MTU2_TMDR_1    0xfffe4381
+#define MTU2_TIOR_1    0xfffe4382
+#define MTU2_TIER_1    0xfffe4384
+#define MTU2_TSR_1     0xfffe4385
+#define MTU2_TCNT_1    0xfffe4386      /* 16-bit counter */
+#define MTU2_TGRA_1    0xfffe438a
+
+#define STBCR3         0xfffe0408
+
+#define MTU2_TSTR_CST1 (1 << 1)        /* Counter Start 1 */
+
+#define MTU2_TSR_TGFA  (1 << 0)        /* GRA compare match */
+
+#define MTU2_TIER_TGIEA        (1 << 0)        /* GRA compare match  interrupt enable */
+
+#define MTU2_TCR_INIT  0x22
+
+#define MTU2_TCR_CALIB  0x00
+
+static unsigned long mtu2_timer_get_offset(void)
+{
+       int count;
+       static int count_p = 0x7fff;    /* for the first call after boot */
+       static unsigned long jiffies_p = 0;
+
+       /*
+        * cache volatile jiffies temporarily; we have IRQs turned off.
+        */
+       unsigned long jiffies_t;
+
+       /* timer count may underflow right here */
+       count = ctrl_inw(MTU2_TCNT_1);  /* read the latched count */
+
+       jiffies_t = jiffies;
+
+       /*
+        * avoiding timer inconsistencies (they are rare, but they happen)...
+        * there is one kind of problem that must be avoided here:
+        *  1. the timer counter underflows
+        */
+
+       if (jiffies_t == jiffies_p) {
+               if (count > count_p) {
+                       if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) {
+                               count -= LATCH;
+                       } else {
+                               printk("%s (): hardware timer problem?\n",
+                                      __FUNCTION__);
+                       }
+               }
+       } else
+               jiffies_p = jiffies_t;
+
+       count_p = count;
+
+       count = ((LATCH-1) - count) * TICK_SIZE;
+       count = (count + LATCH/2) / LATCH;
+
+       return count;
+}
+
+static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
+{
+       unsigned long timer_status;
+
+       /* Clear TGFA bit */
+       timer_status = ctrl_inb(MTU2_TSR_1);
+       timer_status &= ~MTU2_TSR_TGFA;
+       ctrl_outb(timer_status, MTU2_TSR_1);
+
+       /* Do timer tick */
+       write_seqlock(&xtime_lock);
+       handle_timer_tick();
+       write_sequnlock(&xtime_lock);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction mtu2_irq = {
+       .name           = "timer",
+       .handler        = mtu2_timer_interrupt,
+       .flags          = IRQF_DISABLED | IRQF_TIMER,
+       .mask           = CPU_MASK_NONE,
+};
+
+static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 };
+
+static void mtu2_clk_init(struct clk *clk)
+{
+       u8 idx = MTU2_TCR_INIT & 0x7;
+
+       clk->rate = clk->parent->rate / divisors[idx];
+       /* Start TCNT counting */
+       ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+
+}
+
+static void mtu2_clk_recalc(struct clk *clk)
+{
+       u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7;
+       clk->rate = clk->parent->rate / divisors[idx];
+}
+
+static struct clk_ops mtu2_clk_ops = {
+       .init           = mtu2_clk_init,
+       .recalc         = mtu2_clk_recalc,
+};
+
+static struct clk mtu2_clk1 = {
+       .name           = "mtu2_clk1",
+       .ops            = &mtu2_clk_ops,
+};
+
+static int mtu2_timer_start(void)
+{
+       ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR);
+       return 0;
+}
+
+static int mtu2_timer_stop(void)
+{
+       ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR);
+       return 0;
+}
+
+static int mtu2_timer_init(void)
+{
+       u8 tmp;
+       unsigned long interval;
+
+       setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq);
+
+       mtu2_clk1.parent = clk_get(NULL, "module_clk");
+
+       ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3);
+
+       /* Normal operation */
+       ctrl_outb(0, MTU2_TMDR_1);
+       ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1);
+       ctrl_outb(0x01, MTU2_TIOR_1);
+
+       /* Enable underflow interrupt */
+       ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1);
+
+       interval = CONFIG_SH_PCLK_FREQ / 16 / HZ;
+       printk(KERN_INFO "Interval = %ld\n", interval);
+
+       ctrl_outw(interval, MTU2_TGRA_1);
+       ctrl_outw(0, MTU2_TCNT_1);
+
+       clk_register(&mtu2_clk1);
+       clk_enable(&mtu2_clk1);
+
+       return 0;
+}
+
+struct sys_timer_ops mtu2_timer_ops = {
+       .init           = mtu2_timer_init,
+       .start          = mtu2_timer_start,
+       .stop           = mtu2_timer_stop,
+#ifndef CONFIG_GENERIC_TIME
+       .get_offset     = mtu2_timer_get_offset,
+#endif
+};
+
+struct sys_timer mtu2_timer = {
+       .name   = "mtu2",
+       .ops    = &mtu2_timer_ops,
+};
index 24927015dc31fc901153935e91eca80e32332c67..e060e71d0785f222af07ecc4e653265284aa7278 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
-#include <linux/spinlock.h>
 #include <linux/seqlock.h>
 #include <asm/timer.h>
 #include <asm/rtc.h>
 
 #define TMU0_TCR_CALIB 0x0000
 
-static DEFINE_SPINLOCK(tmu0_lock);
-
 static unsigned long tmu_timer_get_offset(void)
 {
        int count;
-       unsigned long flags;
-
        static int count_p = 0x7fffffff;    /* for the first call after boot */
        static unsigned long jiffies_p = 0;
 
@@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void)
         */
        unsigned long jiffies_t;
 
-       spin_lock_irqsave(&tmu0_lock, flags);
        /* timer count may underflow right here */
        count = ctrl_inl(TMU0_TCNT);    /* read the latched count */
 
@@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void)
                jiffies_p = jiffies_t;
 
        count_p = count;
-       spin_unlock_irqrestore(&tmu0_lock, flags);
 
        count = ((LATCH-1) - count) * TICK_SIZE;
        count = (count + LATCH/2) / LATCH;
@@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
 static struct irqaction tmu_irq = {
        .name           = "timer",
        .handler        = tmu_timer_interrupt,
-       .flags          = IRQF_DISABLED,
+       .flags          = IRQF_DISABLED | IRQF_TIMER,
        .mask           = CPU_MASK_NONE,
 };
 
@@ -149,9 +142,9 @@ static int tmu_timer_init(void)
 {
        unsigned long interval;
 
-       setup_irq(TIMER_IRQ, &tmu_irq);
+       setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq);
 
-       tmu0_clk.parent = clk_get("module_clk");
+       tmu0_clk.parent = clk_get(NULL, "module_clk");
 
        /* Start TMU0 */
        tmu_timer_stop();
index dc1f631053a8175744e03e63979a9cf67618d77e..a6bcc913d25e8687f96ad783220fbdfaa64f111b 100644 (file)
 static struct sys_timer *sys_timers[] __initdata = {
 #ifdef CONFIG_SH_TMU
        &tmu_timer,
+#endif
+#ifdef CONFIG_SH_MTU2
+       &mtu2_timer,
+#endif
+#ifdef CONFIG_SH_CMT
+       &cmt_timer,
 #endif
        NULL,
 };
index 53dfa55f3156b40327e977aef77d7fef08d5351e..3762d9dc20466a026720b1e2821e3c7a4aef8db8 100644 (file)
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/io.h>
+#include <linux/debug_locks.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
 #ifdef CONFIG_SH_KGDB
 #include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs)                         \
-{                                              \
+#define CHK_REMOTE_DEBUG(regs)                 \
+{                                              \
        if (kgdb_debug_hook && !user_mode(regs))\
                (*kgdb_debug_hook)(regs);       \
 }
 #endif
 
 #ifdef CONFIG_CPU_SH2
-#define TRAP_RESERVED_INST     4
-#define TRAP_ILLEGAL_SLOT_INST 6
+# define TRAP_RESERVED_INST    4
+# define TRAP_ILLEGAL_SLOT_INST        6
+# define TRAP_ADDRESS_ERROR    9
+# ifdef CONFIG_CPU_SH2A
+#  define TRAP_DIVZERO_ERROR   17
+#  define TRAP_DIVOVF_ERROR    18
+# endif
 #else
 #define TRAP_RESERVED_INST     12
 #define TRAP_ILLEGAL_SLOT_INST 13
@@ -88,7 +94,7 @@ void die(const char * str, struct pt_regs * regs, long err)
 
        if (!user_mode(regs) || in_interrupt())
                dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
-                        (unsigned long)task_stack_page(current));
+                        (unsigned long)task_stack_page(current));
 
        bust_spinlocks(0);
        spin_unlock_irq(&die_lock);
@@ -102,8 +108,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs,
                die(str, regs, err);
 }
 
-static int handle_unaligned_notify_count = 10;
-
 /*
  * try and fix up kernelspace address errors
  * - userspace errors just cause EFAULT to be returned, resulting in SEGV
@@ -198,7 +202,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
                if (copy_to_user(dst,src,4))
                        goto fetch_fault;
                ret = 0;
-               break;
+               break;
 
        case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
                if (instruction & 4)
@@ -222,7 +226,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
                if (copy_from_user(dst,src,4))
                        goto fetch_fault;
                ret = 0;
-               break;
+               break;
 
        case 6: /* mov.[bwl] from memory, possibly with post-increment */
                src = (unsigned char*) *rm;
@@ -230,7 +234,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
                        *rm += count;
                dst = (unsigned char*) rn;
                *(unsigned long*)dst = 0;
-               
+
 #ifdef __LITTLE_ENDIAN__
                if (copy_from_user(dst, src, count))
                        goto fetch_fault;
@@ -241,7 +245,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
                }
 #else
                dst += 4-count;
-               
+
                if (copy_from_user(dst, src, count))
                        goto fetch_fault;
 
@@ -320,7 +324,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
                        return -EFAULT;
 
                /* kernel */
-               die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
+               die("delay-slot-insn faulting in handle_unaligned_delayslot",
+                   regs, 0);
        }
 
        return handle_unaligned_ins(instruction,regs);
@@ -342,6 +347,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs)
 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
 {
        u_int rm;
@@ -354,7 +366,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
        if (user_mode(regs) && handle_unaligned_notify_count>0) {
                handle_unaligned_notify_count--;
 
-               printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+               printk(KERN_NOTICE "Fixing up unaligned userspace access "
+                      "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
                       current->comm,current->pid,(u16*)regs->pc,instruction);
        }
 
@@ -478,32 +491,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
                regs->pc += 2;
        return ret;
 }
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x)     \
+       __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x)     \
+       __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
 
 /*
- * Handle various address error exceptions
+ * Handle various address error exceptions:
+ *  - instruction address error:
+ *       misaligned PC
+ *       PC >= 0x80000000 in user mode
+ *  - data address error (read and write)
+ *       misaligned data access
+ *       access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read acceses.
  */
-asmlinkage void do_address_error(struct pt_regs *regs, 
+asmlinkage void do_address_error(struct pt_regs *regs,
                                 unsigned long writeaccess,
                                 unsigned long address)
 {
-       unsigned long error_code;
+       unsigned long error_code = 0;
        mm_segment_t oldfs;
+       siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
        u16 instruction;
        int tmp;
+#endif
 
-       asm volatile("stc       r2_bank,%0": "=r" (error_code));
+       /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+       lookup_exception_vector(error_code);
+#endif
 
        oldfs = get_fs();
 
        if (user_mode(regs)) {
+               int si_code = BUS_ADRERR;
+
                local_irq_enable();
-               current->thread.error_code = error_code;
-               current->thread.trap_no = (writeaccess) ? 8 : 7;
 
                /* bad PC is not something we can fix */
-               if (regs->pc & 1)
+               if (regs->pc & 1) {
+                       si_code = BUS_ADRALN;
                        goto uspace_segv;
+               }
 
+#ifndef CONFIG_CPU_SH2A
                set_fs(USER_DS);
                if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
                        /* Argh. Fault on the instruction itself.
@@ -518,14 +557,23 @@ asmlinkage void do_address_error(struct pt_regs *regs,
 
                if (tmp==0)
                        return; /* sorted */
+#endif
 
-       uspace_segv:
-               printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
-               force_sig(SIGSEGV, current);
+uspace_segv:
+               printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+                      "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+                      regs->pr);
+
+               info.si_signo = SIGBUS;
+               info.si_errno = 0;
+               info.si_code = si_code;
+               info.si_addr = (void *) address;
+               force_sig_info(SIGBUS, &info, current);
        } else {
                if (regs->pc & 1)
                        die("unaligned program counter", regs, error_code);
 
+#ifndef CONFIG_CPU_SH2A
                set_fs(KERNEL_DS);
                if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
                        /* Argh. Fault on the instruction itself.
@@ -537,6 +585,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
 
                handle_unaligned_access(instruction, regs);
                set_fs(oldfs);
+#else
+               printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+                      "access\n", current->comm);
+
+               force_sig(SIGSEGV, current);
+#endif
        }
 }
 
@@ -548,7 +602,7 @@ int is_dsp_inst(struct pt_regs *regs)
 {
        unsigned short inst;
 
-       /* 
+       /*
         * Safe guard if DSP mode is already enabled or we're lacking
         * the DSP altogether.
         */
@@ -569,27 +623,49 @@ int is_dsp_inst(struct pt_regs *regs)
 #define is_dsp_inst(regs)      (0)
 #endif /* CONFIG_SH_DSP */
 
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+                               unsigned long r6, unsigned long r7,
+                               struct pt_regs __regs)
+{
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       siginfo_t info;
+
+       switch (r4) {
+       case TRAP_DIVZERO_ERROR:
+               info.si_code = FPE_INTDIV;
+               break;
+       case TRAP_DIVOVF_ERROR:
+               info.si_code = FPE_INTOVF;
+               break;
+       }
+
+       force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
 /* arch/sh/kernel/cpu/sh4/fpu.c */
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
-               unsigned long r6, unsigned long r7, struct pt_regs regs);
+               unsigned long r6, unsigned long r7, struct pt_regs __regs);
 
 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
                                unsigned long r6, unsigned long r7,
-                               struct pt_regs regs)
+                               struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        unsigned long error_code;
        struct task_struct *tsk = current;
 
 #ifdef CONFIG_SH_FPU_EMU
-       unsigned short inst;
+       unsigned short inst = 0;
        int err;
 
-       get_user(inst, (unsigned short*)regs.pc);
+       get_user(inst, (unsigned short*)regs->pc);
 
-       err = do_fpu_inst(inst, &regs);
+       err = do_fpu_inst(inst, regs);
        if (!err) {
-               regs.pc += 2;
+               regs->pc += 2;
                return;
        }
        /* not a FPU inst. */
@@ -597,20 +673,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
 
 #ifdef CONFIG_SH_DSP
        /* Check if it's a DSP instruction */
-       if (is_dsp_inst(&regs)) {
+       if (is_dsp_inst(regs)) {
                /* Enable DSP mode, and restart instruction. */
-               regs.sr |= SR_DSP;
+               regs->sr |= SR_DSP;
                return;
        }
 #endif
 
-       asm volatile("stc       r2_bank, %0": "=r" (error_code));
+       lookup_exception_vector(error_code);
+
        local_irq_enable();
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = TRAP_RESERVED_INST;
-       CHK_REMOTE_DEBUG(&regs);
+       CHK_REMOTE_DEBUG(regs);
        force_sig(SIGILL, tsk);
-       die_if_no_fixup("reserved instruction", &regs, error_code);
+       die_if_no_fixup("reserved instruction", regs, error_code);
 }
 
 #ifdef CONFIG_SH_FPU_EMU
@@ -658,39 +733,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs)
 
 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
                                unsigned long r6, unsigned long r7,
-                               struct pt_regs regs)
+                               struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        unsigned long error_code;
        struct task_struct *tsk = current;
 #ifdef CONFIG_SH_FPU_EMU
-       unsigned short inst;
+       unsigned short inst = 0;
 
-       get_user(inst, (unsigned short *)regs.pc + 1);
-       if (!do_fpu_inst(inst, &regs)) {
-               get_user(inst, (unsigned short *)regs.pc);
-               if (!emulate_branch(inst, &regs))
+       get_user(inst, (unsigned short *)regs->pc + 1);
+       if (!do_fpu_inst(inst, regs)) {
+               get_user(inst, (unsigned short *)regs->pc);
+               if (!emulate_branch(inst, regs))
                        return;
                /* fault in branch.*/
        }
        /* not a FPU inst. */
 #endif
 
-       asm volatile("stc       r2_bank, %0": "=r" (error_code));
+       lookup_exception_vector(error_code);
+
        local_irq_enable();
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = TRAP_RESERVED_INST;
-       CHK_REMOTE_DEBUG(&regs);
+       CHK_REMOTE_DEBUG(regs);
        force_sig(SIGILL, tsk);
-       die_if_no_fixup("illegal slot instruction", &regs, error_code);
+       die_if_no_fixup("illegal slot instruction", regs, error_code);
 }
 
 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
                                   unsigned long r6, unsigned long r7,
-                                  struct pt_regs regs)
+                                  struct pt_regs __regs)
 {
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
        long ex;
-       asm volatile("stc       r2_bank, %0" : "=r" (ex));
-       die_if_kernel("exception", &regs, ex);
+
+       lookup_exception_vector(ex);
+       die_if_kernel("exception", regs, ex);
 }
 
 #if defined(CONFIG_SH_STANDARD_BIOS)
@@ -735,12 +812,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler)
 {
        extern void *exception_handling_table[];
        void *old_handler;
-       
+
        old_handler = exception_handling_table[vec];
        exception_handling_table[vec] = handler;
        return old_handler;
 }
 
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+                                            unsigned long r6, unsigned long r7,
+                                            struct pt_regs __regs);
+
 void __init trap_init(void)
 {
        set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
@@ -759,7 +840,15 @@ void __init trap_init(void)
        set_exception_table_evt(0x800, do_fpu_state_restore);
        set_exception_table_evt(0x820, do_fpu_state_restore);
 #endif
-               
+
+#ifdef CONFIG_CPU_SH2
+       set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+       set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+       set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
        /* Setup VBR for boot cpu */
        per_cpu_trap_init();
 }
@@ -784,6 +873,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
        }
 
        printk("\n");
+
+       if (!tsk)
+               tsk = current;
+
+       debug_show_held_locks(tsk);
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)
index 075d6cc1a2d79453c5aea27073cc1c8c79e64094..deb46941f315c6aa711e72b4f8b828ee73b011a6 100644 (file)
@@ -97,7 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
                goto up_fail;
        }
 
-       vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                ret = -ENOMEM;
                goto up_fail;
index 9dd606464d234347d4e0f94bec69b34f129e2a42..4e0362f5038468666c546efa9af3b69dd5a79555 100644 (file)
@@ -4,8 +4,12 @@ menu "Processor selection"
 # Processor families
 #
 config CPU_SH2
+       select SH_WRITETHROUGH if !CPU_SH2A
        bool
-       select SH_WRITETHROUGH
+
+config CPU_SH2A
+       bool
+       select CPU_SH2
 
 config CPU_SH3
        bool
@@ -16,6 +20,7 @@ config CPU_SH4
        bool
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
+       select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
 
 config CPU_SH4A
        bool
@@ -40,6 +45,16 @@ config CPU_SUBTYPE_SH7604
        bool "Support SH7604 processor"
        select CPU_SH2
 
+config CPU_SUBTYPE_SH7619
+       bool "Support SH7619 processor"
+       select CPU_SH2
+
+comment "SH-2A Processor Support"
+
+config CPU_SUBTYPE_SH7206
+       bool "Support SH7206 processor"
+       select CPU_SH2A
+
 comment "SH-3 Processor Support"
 
 config CPU_SUBTYPE_SH7300
@@ -89,6 +104,7 @@ comment "SH-4 Processor Support"
 config CPU_SUBTYPE_SH7750
        bool "Support SH7750 processor"
        select CPU_SH4
+       select CPU_HAS_IPR_IRQ
        help
          Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU.
 
@@ -104,15 +120,18 @@ config CPU_SUBTYPE_SH7750R
        bool "Support SH7750R processor"
        select CPU_SH4
        select CPU_SUBTYPE_SH7750
+       select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7750S
        bool "Support SH7750S processor"
        select CPU_SH4
        select CPU_SUBTYPE_SH7750
+       select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7751
        bool "Support SH7751 processor"
        select CPU_SH4
+       select CPU_HAS_IPR_IRQ
        help
          Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU,
          or if you have a HD6417751R CPU.
@@ -121,6 +140,7 @@ config CPU_SUBTYPE_SH7751R
        bool "Support SH7751R processor"
        select CPU_SH4
        select CPU_SUBTYPE_SH7751
+       select CPU_HAS_IPR_IRQ
 
 config CPU_SUBTYPE_SH7760
        bool "Support SH7760 processor"
@@ -157,6 +177,11 @@ config CPU_SUBTYPE_SH7780
        select CPU_SH4A
        select CPU_HAS_INTC2_IRQ
 
+config CPU_SUBTYPE_SH7785
+       bool "Support SH7785 processor"
+       select CPU_SH4A
+       select CPU_HAS_INTC2_IRQ
+
 comment "SH4AL-DSP Processor Support"
 
 config CPU_SUBTYPE_SH73180
@@ -216,13 +241,22 @@ config MEMORY_SIZE
 
 config 32BIT
        bool "Support 32-bit physical addressing through PMB"
-       depends on CPU_SH4A && MMU
+       depends on CPU_SH4A && MMU && (!X2TLB || BROKEN)
        default y
        help
          If you say Y here, physical addressing will be extended to
          32-bits through the SH-4A PMB. If this is not set, legacy
          29-bit physical addressing will be used.
 
+config X2TLB
+       bool "Enable extended TLB mode"
+       depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL
+       help
+         Selecting this option will enable the extended mode of the SH-X2
+         TLB. For legacy SH-X behaviour and interoperability, say N. For
+         all of the fun new features and a willingless to submit bug reports,
+         say Y.
+
 config VSYSCALL
        bool "Support vsyscall page"
        depends on MMU
@@ -236,17 +270,53 @@ config VSYSCALL
          For systems with an MMU that can afford to give up a page,
          (the default value) say Y.
 
+choice
+       prompt "Kernel page size"
+       default PAGE_SIZE_4KB
+
+config PAGE_SIZE_4KB
+       bool "4kB"
+       help
+         This is the default page size used by all SuperH CPUs.
+
+config PAGE_SIZE_8KB
+       bool "8kB"
+       depends on EXPERIMENTAL && X2TLB
+       help
+         This enables 8kB pages as supported by SH-X2 and later MMUs.
+
+config PAGE_SIZE_64KB
+       bool "64kB"
+       depends on EXPERIMENTAL && CPU_SH4
+       help
+         This enables support for 64kB pages, possible on all SH-4
+         CPUs and later. Highly experimental, not recommended.
+
+endchoice
+
 choice
        prompt "HugeTLB page size"
        depends on HUGETLB_PAGE && CPU_SH4 && MMU
        default HUGETLB_PAGE_SIZE_64K
 
 config HUGETLB_PAGE_SIZE_64K
-       bool "64K"
+       bool "64kB"
+
+config HUGETLB_PAGE_SIZE_256K
+       bool "256kB"
+       depends on X2TLB
 
 config HUGETLB_PAGE_SIZE_1MB
        bool "1MB"
 
+config HUGETLB_PAGE_SIZE_4MB
+       bool "4MB"
+       depends on X2TLB
+
+config HUGETLB_PAGE_SIZE_64MB
+       bool "64MB"
+       depends on X2TLB
+
 endchoice
 
 source "mm/Kconfig"
@@ -274,7 +344,6 @@ config SH_DIRECT_MAPPED
 
 config SH_WRITETHROUGH
        bool "Use write-through caching"
-       default y if CPU_SH2
        help
          Selecting this option will configure the caches in write-through
          mode, as opposed to the default write-back configuration.
index 2689cb24ea2b996aafe7a794af64e84380cf9323..6614033f6be93133a419f262d9091536cc05492e 100644 (file)
@@ -5,6 +5,7 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
+
 #include <linux/init.h>
 #include <linux/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-/*
- * Calculate the OC address and set the way bit on the SH-2.
- *
- * We must have already jump_to_P2()'ed prior to calling this
- * function, since we rely on CCR manipulation to do the
- * Right Thing(tm).
- */
-unsigned long __get_oc_addr(unsigned long set, unsigned long way)
+void __flush_wback_region(void *start, int size)
 {
-       unsigned long ccr;
-
-       /*
-        * On SH-2 the way bit isn't tracked in the address field
-        * if we're doing address array access .. instead, we need
-        * to manually switch out the way in the CCR.
-        */
-       ccr = ctrl_inl(CCR);
-       ccr &= ~0x00c0;
-       ccr |= way << cpu_data->dcache.way_shift;
-
-       /*
-        * Despite the number of sets being halved, we end up losing
-        * the first 2 ways to OCRAM instead of the last 2 (if we're
-        * 4-way). As a result, forcibly setting the W1 bit handily
-        * bumps us up 2 ways.
-        */
-       if (ccr & CCR_CACHE_ORA)
-               ccr |= 1 << (cpu_data->dcache.way_shift + 1);
-
-       ctrl_outl(ccr, CCR);
-
-       return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift);
+       unsigned long v;
+       unsigned long begin, end;
+
+       begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+       end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+               & ~(L1_CACHE_BYTES-1);
+       for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+               /* FIXME cache purge */
+               ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+       }
+}
+
+void __flush_purge_region(void *start, int size)
+{
+       unsigned long v;
+       unsigned long begin, end;
+
+       begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+       end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+               & ~(L1_CACHE_BYTES-1);
+       for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+               ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+       }
+}
+
+void __flush_invalidate_region(void *start, int size)
+{
+       unsigned long v;
+       unsigned long begin, end;
+
+       begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
+       end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
+               & ~(L1_CACHE_BYTES-1);
+       for (v = begin; v < end; v+=L1_CACHE_BYTES) {
+               ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008);
+       }
 }
 
index e48cc22724d9e8d7d215544b4904f977c2551418..ae531affccbd75b9a088acbd0623fa2076aebb64 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <asm/addrspace.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
@@ -83,9 +79,9 @@ static void __init emit_cache_params(void)
  */
 
 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
-#define MAX_P3_SEMAPHORES 16
+#define MAX_P3_MUTEXES 16
 
-struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
+struct mutex p3map_mutex[MAX_P3_MUTEXES];
 
 void __init p3_cache_init(void)
 {
@@ -115,7 +111,7 @@ void __init p3_cache_init(void)
                panic("%s failed.", __FUNCTION__);
 
        for (i = 0; i < cpu_data->dcache.n_aliases; i++)
-               sema_init(&p3map_sem[i], 1);
+               mutex_init(&p3map_mutex[i]);
 }
 
 /*
@@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start,
         */
        if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
            (start < CACHE_OC_ADDRESS_ARRAY))
-               exec_offset = 0x20000000;
+               exec_offset = 0x20000000;
 
        local_irq_save(flags);
        __flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page)
 
                /* Loop all the D-cache */
                n = cpu_data->dcache.n_aliases;
-               for (i = 0; i < n; i++, addr += PAGE_SIZE)
+               for (i = 0; i < n; i++, addr += 4096)
                        flush_cache_4096(addr, phys);
        }
 
index 7b96425ae270ec932b324465001f0d42eb15ff25..8a706131e521d9c03485538bc5718130a53c57a1 100644 (file)
@@ -1,12 +1,12 @@
-/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * __clear_user_page, __clear_user, clear_page implementation of SuperH
  *
  * Copyright (C) 2001  Kaz Kojima
  * Copyright (C) 2001, 2002  Niibe Yutaka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * clear_page_slow
 /*
  * r0 --- scratch
  * r4 --- to
- * r5 --- to + 4096
+ * r5 --- to + PAGE_SIZE
  */
 ENTRY(clear_page_slow)
        mov     r4,r5
-       mov.w   .Llimit,r0
+       mov.l   .Llimit,r0
        add     r0,r5
        mov     #0,r0
        !
@@ -50,7 +50,7 @@ ENTRY(clear_page_slow)
        !
        rts
         nop
-.Llimit:       .word   (4096-28)
+.Llimit:       .long   (PAGE_SIZE-28)
 
 ENTRY(__clear_user)
        !
@@ -164,10 +164,10 @@ ENTRY(__clear_user)
  * r0 --- scratch 
  * r4 --- to
  * r5 --- orig_to
- * r6 --- to + 4096
+ * r6 --- to + PAGE_SIZE
  */
 ENTRY(__clear_user_page)
-       mov.w   .L4096,r0
+       mov.l   .Lpsz,r0
        mov     r4,r6
        add     r0,r6
        mov     #0,r0
@@ -191,7 +191,7 @@ ENTRY(__clear_user_page)
        !
        rts
         nop
-.L4096:        .word   4096
+.Lpsz: .long   PAGE_SIZE
 
 #endif
 
index 1addffe117c384725b1aca311f1e0d34d025f591..397c94c97315d9909e8788feadd3767b2b7b8584 100644 (file)
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
  * copy_page, __copy_user_page, __copy_user implementation of SuperH
  *
  * Copyright (C) 2001  Niibe Yutaka & Kaz Kojima
  * Copyright (C) 2002  Toshinobu Sugioka
- *
+ * Copyright (C) 2006  Paul Mundt
  */
 #include <linux/linkage.h>
+#include <asm/page.h>
 
 /*
  * copy_page_slow
@@ -18,7 +18,7 @@
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- not used
  * r10 --- to
  * r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
        mov     r4,r10
        mov     r5,r11
        mov     r5,r8
-       mov.w   .L4096,r0
+       mov.l   .Lpsz,r0
        add     r0,r8
        !
 1:     mov.l   @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
 
 /*
  * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
  * r9 --- orig_to
  * r10 --- to
  * r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
        mov     r5,r11
        mov     r6,r9
        mov     r5,r8
-       mov.w   .L4096,r0
+       mov.l   .Lpsz,r0
        add     r0,r8
        !
 1:     ocbi    @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
        rts
         nop
 #endif
-.L4096:        .word   4096
+.Lpsz: .long   PAGE_SIZE
 /*
  * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  * Return the number of bytes NOT copied
index 68663b8f99aec28619ea7cb92d16978c5a08be25..716ebf568af20d2e2e7e303161d11304886ff794 100644 (file)
@@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long);
  * and the problem, and then passes it off to one of the appropriate
  * routines.
  */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
-                             unsigned long address)
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+                                       unsigned long writeaccess,
+                                       unsigned long address)
 {
        struct task_struct *tsk;
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long page;
+       int si_code;
+       siginfo_t info;
+
+       trace_hardirqs_on();
+       local_irq_enable();
 
 #ifdef CONFIG_SH_KGDB
        if (kgdb_nofault && kgdb_bus_err_hook)
@@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
 
        tsk = current;
        mm = tsk->mm;
+       si_code = SEGV_MAPERR;
+
+       if (unlikely(address >= TASK_SIZE)) {
+               /*
+                * Synchronize this task's top level page-table
+                * with the 'reference' page table.
+                *
+                * Do _not_ use "tsk" here. We might be inside
+                * an interrupt in the middle of a task switch..
+                */
+               int offset = pgd_index(address);
+               pgd_t *pgd, *pgd_k;
+               pud_t *pud, *pud_k;
+               pmd_t *pmd, *pmd_k;
+
+               pgd = get_TTB() + offset;
+               pgd_k = swapper_pg_dir + offset;
+
+               /* This will never happen with the folded page table. */
+               if (!pgd_present(*pgd)) {
+                       if (!pgd_present(*pgd_k))
+                               goto bad_area_nosemaphore;
+                       set_pgd(pgd, *pgd_k);
+                       return;
+               }
+
+               pud = pud_offset(pgd, address);
+               pud_k = pud_offset(pgd_k, address);
+               if (pud_present(*pud) || !pud_present(*pud_k))
+                       goto bad_area_nosemaphore;
+               set_pud(pud, *pud_k);
+
+               pmd = pmd_offset(pud, address);
+               pmd_k = pmd_offset(pud_k, address);
+               if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+                       goto bad_area_nosemaphore;
+               set_pmd(pmd, *pmd_k);
+
+               return;
+       }
 
        /*
         * If we're in an interrupt or have no user
@@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
  * we can handle it..
  */
 good_area:
+       si_code = SEGV_ACCERR;
        if (writeaccess) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
@@ -104,10 +151,13 @@ survive:
 bad_area:
        up_read(&mm->mmap_sem);
 
+bad_area_nosemaphore:
        if (user_mode(regs)) {
-               tsk->thread.address = address;
-               tsk->thread.error_code = writeaccess;
-               force_sig(SIGSEGV, tsk);
+               info.si_signo = SIGSEGV;
+               info.si_errno = 0;
+               info.si_code = si_code;
+               info.si_addr = (void *) address;
+               force_sig_info(SIGSEGV, &info, tsk);
                return;
        }
 
@@ -127,11 +177,9 @@ no_context:
                printk(KERN_ALERT "Unable to handle kernel paging request");
        printk(" at virtual address %08lx\n", address);
        printk(KERN_ALERT "pc = %08lx\n", regs->pc);
-       asm volatile("mov.l     %1, %0"
-                    : "=r" (page)
-                    : "m" (__m(MMU_TTB)));
+       page = (unsigned long)get_TTB();
        if (page) {
-               page = ((unsigned long *) page)[address >> 22];
+               page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
                printk(KERN_ALERT "*pde = %08lx\n", page);
                if (page & _PAGE_PRESENT) {
                        page &= PAGE_MASK;
@@ -166,98 +214,13 @@ do_sigbus:
         * Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
-       tsk->thread.address = address;
-       tsk->thread.error_code = writeaccess;
-       tsk->thread.trap_no = 14;
-       force_sig(SIGBUS, tsk);
+       info.si_signo = SIGBUS;
+       info.si_errno = 0;
+       info.si_code = BUS_ADRERR;
+       info.si_addr = (void *)address;
+       force_sig_info(SIGBUS, &info, tsk);
 
        /* Kernel mode? Handle exceptions or die */
        if (!user_mode(regs))
                goto no_context;
 }
-
-#ifdef CONFIG_SH_STORE_QUEUES
-/*
- * This is a special case for the SH-4 store queues, as pages for this
- * space still need to be faulted in before it's possible to flush the
- * store queue cache for writeout to the remapped region.
- */
-#define P3_ADDR_MAX            (P4SEG_STORE_QUE + 0x04000000)
-#else
-#define P3_ADDR_MAX            P4SEG
-#endif
-
-/*
- * Called with interrupts disabled.
- */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-                                        unsigned long writeaccess,
-                                        unsigned long address)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t entry;
-       struct mm_struct *mm = current->mm;
-       spinlock_t *ptl;
-       int ret = 1;
-
-#ifdef CONFIG_SH_KGDB
-       if (kgdb_nofault && kgdb_bus_err_hook)
-               kgdb_bus_err_hook();
-#endif
-
-       /*
-        * We don't take page faults for P1, P2, and parts of P4, these
-        * are always mapped, whether it be due to legacy behaviour in
-        * 29-bit mode, or due to PMB configuration in 32-bit mode.
-        */
-       if (address >= P3SEG && address < P3_ADDR_MAX) {
-               pgd = pgd_offset_k(address);
-               mm = NULL;
-       } else {
-               if (unlikely(address >= TASK_SIZE || !mm))
-                       return 1;
-
-               pgd = pgd_offset(mm, address);
-       }
-
-       pud = pud_offset(pgd, address);
-       if (pud_none_or_clear_bad(pud))
-               return 1;
-       pmd = pmd_offset(pud, address);
-       if (pmd_none_or_clear_bad(pmd))
-               return 1;
-
-       if (mm)
-               pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-       else
-               pte = pte_offset_kernel(pmd, address);
-
-       entry = *pte;
-       if (unlikely(pte_none(entry) || pte_not_present(entry)))
-               goto unlock;
-       if (unlikely(writeaccess && !pte_write(entry)))
-               goto unlock;
-
-       if (writeaccess)
-               entry = pte_mkdirty(entry);
-       entry = pte_mkyoung(entry);
-
-#ifdef CONFIG_CPU_SH4
-       /*
-        * ITLB is not affected by "ldtlb" instruction.
-        * So, we need to flush the entry by ourselves.
-        */
-       __flush_tlb_page(get_asid(), address & PAGE_MASK);
-#endif
-
-       set_pte(pte, entry);
-       update_mmu_cache(NULL, address, entry);
-       ret = 0;
-unlock:
-       if (mm)
-               pte_unmap_unlock(pte, ptl);
-       return ret;
-}
index 329059d6b54a79b2bdfb2eacdb2c4477f5936386..cf2c2ee35a376dab1ef632f27fd1be51bf5e38cd 100644 (file)
@@ -63,6 +63,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
 struct page *follow_huge_addr(struct mm_struct *mm,
                              unsigned long address, int write)
 {
index 7154d1ce97859098b2c7647eee495c9fc13ef5d7..59f4cc18235b289c2849f4c3b48b72224f85b5fc 100644 (file)
@@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
        pmd_t *pmd;
        pte_t *pte;
 
-       pgd = swapper_pg_dir + pgd_index(addr);
+       pgd = pgd_offset_k(addr);
        if (pgd_none(*pgd)) {
                pgd_ERROR(*pgd);
                return;
        }
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud)) {
-               pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
-               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
-               if (pmd != pmd_offset(pud, 0)) {
-                       pud_ERROR(*pud);
-                       return;
-               }
+       pud = pud_alloc(NULL, pgd, addr);
+       if (unlikely(!pud)) {
+               pud_ERROR(*pud);
+               return;
        }
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd)) {
-               pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
-               set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
-               if (pte != pte_offset_kernel(pmd, 0)) {
-                       pmd_ERROR(*pmd);
-                       return;
-               }
+       pmd = pmd_alloc(NULL, pud, addr);
+       if (unlikely(!pmd)) {
+               pmd_ERROR(*pmd);
+               return;
        }
 
        pte = pte_offset_kernel(pmd, addr);
@@ -155,9 +147,6 @@ extern char __init_begin, __init_end;
 
 /*
  * paging_init() sets up the page tables
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
  */
 void __init paging_init(void)
 {
@@ -180,14 +169,11 @@ void __init paging_init(void)
         */
        {
                unsigned long max_dma, low, start_pfn;
-               pgd_t *pg_dir;
-               int i;
-
-               /* We don't need kernel mapping as hardware support that. */
-               pg_dir = swapper_pg_dir;
 
-               for (i = 0; i < PTRS_PER_PGD; i++)
-                       pgd_val(pg_dir[i]) = 0;
+               /* We don't need to map the kernel through the TLB, as
+                * it is permanatly mapped using P1. So clear the
+                * entire pgd. */
+               memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
 
                /* Turn on the MMU */
                enable_mmu();
@@ -206,6 +192,10 @@ void __init paging_init(void)
                }
        }
 
+       /* Set an initial value for the MMU.TTB so we don't have to
+        * check for a null value. */
+       set_TTB(swapper_pg_dir);
+
 #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
        /*
         * If we don't have CONFIG_MMU set and the processor in question
@@ -227,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
 
 void __init mem_init(void)
 {
-       extern unsigned long empty_zero_page[1024];
        int codesize, reservedpages, datasize, initsize;
        int tmp;
        extern unsigned long memory_start;
index a9fe80cfc2338feaf2bbf2797b9988ac3982633a..11d54c14982155341968b3ad76eab08499f1f9c7 100644 (file)
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
 {
        unsigned long end;
        unsigned long pfn;
-       pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW |
-                                  _PAGE_DIRTY | _PAGE_ACCESSED |
-                                  _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags);
+       pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
 
        address &= ~PMD_MASK;
        end = address + size;
index 1406d2e348ca9b802014d3ad3f37cd7d86661a0d..bb23679369d67d7c6d44ac2a74adf9c37061674d 100644 (file)
@@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from)
 
 static void clear_page_dma(void *to)
 {
-       extern unsigned long empty_zero_page[1024];
-
        /*
         * We get invoked quite early on, if the DMAC hasn't been initialized
         * yet, fall back on the slow manual implementation.
index 07371ed7a31328e49fcf34377abc3b7b247cffb5..3f98d2a4f936355507e4386855e442b7fa6860ac 100644 (file)
@@ -6,22 +6,12 @@
  *
  * Released under the terms of the GNU GPL v2.0.
  */
-#include <linux/init.h>
-#include <linux/mman.h>
 #include <linux/mm.h>
-#include <linux/threads.h>
-#include <asm/addrspace.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
+#include <linux/mutex.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-extern struct semaphore p3map_sem[];
+extern struct mutex p3map_mutex[];
 
 #define CACHE_ALIAS (cpu_data->dcache.alias_mask)
 
@@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
        if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
                clear_page(to);
        else {
-               pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-                                          _PAGE_RW | _PAGE_CACHABLE |
-                                          _PAGE_DIRTY | _PAGE_ACCESSED |
-                                          _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
                unsigned long phys_addr = PHYSADDR(to);
                unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
                pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
                pte_t entry;
                unsigned long flags;
 
-               entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-               down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+               entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+               mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
                set_pte(pte, entry);
                local_irq_save(flags);
                __flush_tlb_page(get_asid(), p3_addr);
@@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
                update_mmu_cache(NULL, p3_addr, entry);
                __clear_user_page((void *)p3_addr, to);
                pte_clear(&init_mm, p3_addr, pte);
-               up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+               mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
        }
 }
 
@@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address,
        if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
                copy_page(to, from);
        else {
-               pgprot_t pgprot = __pgprot(_PAGE_PRESENT |
-                                          _PAGE_RW | _PAGE_CACHABLE |
-                                          _PAGE_DIRTY | _PAGE_ACCESSED |
-                                          _PAGE_HW_SHARED | _PAGE_FLAGS_HARD);
                unsigned long phys_addr = PHYSADDR(to);
                unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
                pgd_t *pgd = pgd_offset_k(p3_addr);
@@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address,
                pte_t entry;
                unsigned long flags;
 
-               entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot);
-               down(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+               entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
+               mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
                set_pte(pte, entry);
                local_irq_save(flags);
                __flush_tlb_page(get_asid(), p3_addr);
@@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address,
                update_mmu_cache(NULL, p3_addr, entry);
                __copy_user_page((void *)p3_addr, from, to);
                pte_clear(&init_mm, p3_addr, pte);
-               up(&p3map_sem[(address & CACHE_ALIAS)>>12]);
+               mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
        }
 }
 
@@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t
        }
        return pte;
 }
-
index 92e745341e4d67335a23d7c09b934c48c8a02cca..b60ad83a7635ceceb19bc8896b5774f7f0c5c001 100644 (file)
@@ -30,7 +30,7 @@
 
 #define NR_PMB_ENTRIES 16
 
-static kmem_cache_t *pmb_cache;
+static struct kmem_cache *pmb_cache;
 static unsigned long pmb_map;
 
 static struct pmb_entry pmb_init_map[] = {
@@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr)
        } while (pmbe);
 }
 
-static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
        struct pmb_entry *pmbe = pmb;
 
@@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
        spin_unlock_irq(&pmb_list_lock);
 }
 
-static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
+static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
 {
        spin_lock_irq(&pmb_list_lock);
        pmb_list_del(pmb);
index ac57638977ee639f332b541a9b2624a233a81d6d..0571755e9a84e8e6b2ae55e266043923f03423e8 100644 (file)
@@ -30,3 +30,5 @@ R7780MP                       SH_R7780MP
 TITAN                  SH_TITAN
 SHMIN                  SH_SHMIN
 7710VOIPGW             SH_7710VOIPGW
+7206SE                 SH_7206_SOLUTION_ENGINE
+7619SE                 SH_7619_SOLUTION_ENGINE
index ffb310e33cefbe505ba25521e7b50a2e5bb5d2d4..b9e7d54d7b8553d89d2c9988c561f4227d38d19b 100644 (file)
@@ -243,9 +243,7 @@ void __init setup_arch(char **cmdline_p)
                if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) {
                        reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE);
 
-                       initrd_start =
-                         (long) INITRD_START ? INITRD_START + PAGE_OFFSET +  __MEMORY_START : 0;
-
+                       initrd_start = (long) INITRD_START + PAGE_OFFSET + __MEMORY_START;
                        initrd_end = initrd_start + INITRD_SIZE;
                } else {
                        printk("initrd extends beyond end of memory "
index 9e2ffc45c0e0ffe480c614da00edee056903fec3..1666d3efb52e2fccbb56cca8808d40ab17512097 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/errno.h>
 #include <linux/wait.h>
 #include <linux/personality.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
 #include <linux/stddef.h>
index 8e2f6c28b7390fc712a8d07f8e8e567fbd7b6853..4f72ab33bb2b0a7cb9caae2b09a135f96c8a6ed9 100644 (file)
@@ -154,7 +154,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_interrupt() || !mm)
+       if (in_atomic() || !mm)
                goto no_context;
 
        /* TLB misses upon some cache flushes get done under cli() */
index 187cf01750b80fe43ce2f8f32375b360a16a819f..4b455f61114670282914c7b6bf7354974d63864e 100644 (file)
@@ -53,6 +53,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t entry)
 {
index 5cc5ff7f8824d04f12665e12423b970fbf273553..b73e6b9067eddf5ba48ea26d503c607f67d2ea60 100644 (file)
@@ -11,6 +11,7 @@ SECTIONS
   . = 0x10000 + SIZEOF_HEADERS;
   .text 0xf0004000 :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
index 4d8ed9c651823ae82b3d30c6a5d947eb422a18b9..01fc6c25429255c27be0e6a94e36179e2693b86d 100644 (file)
@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
        unsigned long vaddr;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
+       pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
        unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
 
        if (vaddr < FIXADDR_START) { // FIXME
-               dec_preempt_count();
-               preempt_check_resched();
+               pagefault_enable();
                return;
        }
 
@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
 #endif
 #endif
 
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
index a98f3ae175a343e48c88977e83fc2c3f96e04865..9ad84ff10a177320a2bfaf3d30f553380c9d042e 100644 (file)
@@ -141,7 +141,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
        value->tv_sec = jiffies / HZ;
 }
 
-#define elf_addr_t     u32
 #undef start_thread
 #define start_thread start_thread32
 #define init_elf_binfmt init_elf32_binfmt
index bd9de8c2a2aa1c29158134d0bdaf04754efc8703..4a6063f33e7a4155e084cd50ba58a2617f3fa3c3 100644 (file)
@@ -13,6 +13,7 @@ SECTIONS
   . = 0x4000;
   .text 0x0000000000404000 :
   {
+    _text = .;
     *(.text)
     SCHED_TEXT
     LOCK_TEXT
index 53b9b1f528e54ae75d9caeb90e3fa7bd90b67c8a..33fd0b265e707afdee93ec63322e9bff01220882 100644 (file)
@@ -235,6 +235,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        return pte;
 }
 
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t entry)
 {
index 09cb7fccc03a758ca2e0f8351734e8a550830c02..a8e8802eed4d2679d1c70ac563051379c7d3c4e1 100644 (file)
@@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
 
 int bigkernel = 0;
 
-kmem_cache_t *pgtable_cache __read_mostly;
+struct kmem_cache *pgtable_cache __read_mostly;
 
-static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
 {
        clear_page(addr);
 }
index beaa02810f0e9e27ee6b65b8016984610bbdda0a..236d02f41a01ae576a0d0ab60670ecba29656668 100644 (file)
@@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
        }
 }
 
-static kmem_cache_t *tsb_caches[8] __read_mostly;
+static struct kmem_cache *tsb_caches[8] __read_mostly;
 
 static const char *tsb_cache_names[8] = {
        "tsb_8KB",
index 3576b3cc505e65432a3068dbce33f6aecc1f666b..7d4190e5565498c6eea6dce02fc3cc092a65b384 100644 (file)
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans)
        return -1;
 }
 
-void chan_interrupt(struct list_head *chans, struct work_struct *task,
+void chan_interrupt(struct list_head *chans, struct delayed_work *task,
                    struct tty_struct *tty, int irq)
 {
        struct list_head *ele, *next;
index 824386974f88902469f2d95713ae9e4bf6a2928f..9c2e7a758f211804d8618f7bae98c82e42f57fc5 100644 (file)
@@ -98,4 +98,4 @@ static int register_daemon(void)
        return 0;
 }
 
-__initcall(register_daemon);
+late_initcall(register_daemon);
index 426633e5d6e387cc43604557751a6ca641dcbdc1..aa3090d05a8f7f39fd38eabd332e0c01e4271a41 100644 (file)
@@ -31,9 +31,9 @@ static irqreturn_t line_interrupt(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void line_timer_cb(void *arg)
+static void line_timer_cb(struct work_struct *work)
 {
-       struct line *line = arg;
+       struct line *line = container_of(work, struct line, task.work);
 
        if(!line->throttled)
                chan_interrupt(&line->chan_list, &line->task, line->tty,
@@ -443,7 +443,7 @@ int line_open(struct line *lines, struct tty_struct *tty)
                 * is registered.
                 */
                enable_chan(line);
-               INIT_WORK(&line->task, line_timer_cb, line);
+               INIT_DELAYED_WORK(&line->task, line_timer_cb);
 
                if(!line->sigio){
                        chan_enable_winch(&line->chan_list, tty);
index c090fbd464e7c9c7713d0eb6456aa859d69eb831..52ccb7b53cd24d946d7d1d92260832cea552ea4d 100644 (file)
@@ -127,4 +127,4 @@ static int register_mcast(void)
        return 0;
 }
 
-__initcall(register_mcast);
+late_initcall(register_mcast);
index 7b172160fe0415375d3e5bc1bbbcb00c6ed71105..96f0189327af58260bcbfe14e304faf67cfe513c 100644 (file)
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = {
 
 static LIST_HEAD(mc_requests);
 
-static void mc_work_proc(void *unused)
+static void mc_work_proc(struct work_struct *unused)
 {
        struct mconsole_entry *req;
        unsigned long flags;
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused)
        }
 }
 
-static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc);
 
 static irqreturn_t mconsole_interrupt(int irq, void *dev_id)
 {
index ec9eb8bd9432dd31a89827aaca985fd88d082668..286bc0b3207fd7b13b861088473959933f8aa99b 100644 (file)
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id)
                 * same device, since it tests for (dev->flags & IFF_UP). So
                 * there's no harm in delaying the device shutdown. */
                schedule_work(&close_work);
+#error this is not permitted - close_work will go out of scope
                goto out;
        }
        reactivate_fd(lp->fd, UM_ETH_IRQ);
index 6e1ef8558283254faf6c28f462cf71b90cb6626f..e67362acf0e7b554089d7206230812c6e970405a 100644 (file)
@@ -109,4 +109,4 @@ static int register_pcap(void)
        return 0;
 }
 
-__initcall(register_pcap);
+late_initcall(register_pcap);
index ce9f3733f73ee1a5a3f5b5c5518a06d5af685218..6dfe632f1c14d9a64ebc9a279a65025eefba2ac7 100644 (file)
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port)
 DECLARE_MUTEX(ports_sem);
 struct list_head ports = LIST_HEAD_INIT(ports);
 
-void port_work_proc(void *unused)
+void port_work_proc(struct work_struct *unused)
 {
        struct port_list *port;
        struct list_head *ele;
@@ -150,7 +150,7 @@ void port_work_proc(void *unused)
        local_irq_restore(flags);
 }
 
-DECLARE_WORK(port_work, port_work_proc, NULL);
+DECLARE_WORK(port_work, port_work_proc);
 
 static irqreturn_t port_interrupt(int irq, void *data)
 {
index 788da5439a2defd3b18837e74828b3ec75457ef6..25634bd1f585e13c9563695f2ec2bc13c8318718 100644 (file)
@@ -95,4 +95,4 @@ static int register_slip(void)
        return 0;
 }
 
-__initcall(register_slip);
+late_initcall(register_slip);
index ae322e1c8a8753e3794c18cafa20d5d3af6d9b2d..b3ed8fb874ab2ae0b3e8d54c5aa76d3fcd9a5d81 100644 (file)
@@ -119,4 +119,4 @@ static int register_slirp(void)
        return 0;
 }
 
-__initcall(register_slirp);
+late_initcall(register_slirp);
index 572d286ed2c666e21faa970fd2a88f5658056c9a..9003a343e1484691880fb6ebf20cf0963609f7d6 100644 (file)
@@ -27,7 +27,7 @@ struct chan {
        void *data;
 };
 
-extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
+extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
                           struct tty_struct *tty, int irq);
 extern int parse_chan_pair(char *str, struct line *line, int device,
                           const struct chan_opts *opts);
index 7be24811bb3094b127667941f06c7297c085f2e9..214ee76c40df51ac4d7fe03142d7700fe1bae2bd 100644 (file)
@@ -51,7 +51,7 @@ struct line {
        char *tail;
 
        int sigio;
-       struct work_struct task;
+       struct delayed_work task;
        const struct line_driver *driver;
        int have_irq;
 };
index 6670cc992ecb55de9c98e9ca19131c6c1a556592..52b398bcafcf73ad738cbff7d6e0bd754eb7ddae 100644 (file)
@@ -75,7 +75,7 @@ union uml_pt_regs {
 #endif
 #ifdef UML_CONFIG_MODE_SKAS
        struct skas_regs {
-               unsigned long regs[HOST_FRAME_SIZE];
+               unsigned long regs[MAX_REG_NR];
                unsigned long fp[HOST_FP_SIZE];
                unsigned long xfp[HOST_XFP_SIZE];
                 struct faultinfo faultinfo;
index b492b12b4a1054c6d4c69ceb51cabdbc41c713ce..4fffae75ba531d1083f2f0c41b4a27ec3b40bd38 100644 (file)
@@ -9,6 +9,7 @@
 #include <sys/mman.h>
 #include <asm/ptrace.h>
 #include <asm/unistd.h>
+#include <asm/page.h>
 #include "stub-data.h"
 #include "kern_constants.h"
 #include "uml-config.h"
index 617bb9efc93424ef59fb413cf0394f7266ad12d1..66cb400c2c92bb18e103036ff299e76b52512b96 100644 (file)
@@ -108,7 +108,7 @@ union uml_pt_regs {
                 * file size, while i386 uses FRAME_SIZE.  Therefore, we need
                 * to use UM_FRAME_SIZE here instead of HOST_FRAME_SIZE.
                 */
-               unsigned long regs[UM_FRAME_SIZE];
+               unsigned long regs[MAX_REG_NR];
                unsigned long fp[HOST_FP_SIZE];
                 struct faultinfo faultinfo;
                long syscall;
index 16385e2ada854ebd1c965364a6d065f709294c3f..70541821775f3167c4c6871f8dd2a65917df18c4 100644 (file)
@@ -105,4 +105,4 @@ static int register_ethertap(void)
        return 0;
 }
 
-__initcall(register_ethertap);
+late_initcall(register_ethertap);
index 0edbac63c52708009ac792442fee6eb75dc2fbb5..76570a2c25c33bae1a799a61fb54078301304b29 100644 (file)
@@ -90,4 +90,4 @@ static int register_tuntap(void)
        return 0;
 }
 
-__initcall(register_tuntap);
+late_initcall(register_tuntap);
index e299ee5a753d7b1adddad393e926a4609509cfdb..49057d8bc668b88a5ac4a45a3559171253abfaed 100644 (file)
@@ -3,7 +3,6 @@
  * Licensed under the GPL
  */
 
-#include "linux/stddef.h"
 #include "linux/sched.h"
 #include "linux/slab.h"
 #include "linux/types.h"
index 5f3cc66858209f67c6caf9ebc9f46a808e907498..01212c88fcc4ffc490926f35d7af7861d2006e11 100644 (file)
@@ -4,9 +4,9 @@
  */
 
 #include <stdio.h>
+#include <stddef.h>
 #include <errno.h>
 #include <unistd.h>
-#include <linux/stddef.h>
 #include "ptrace_user.h"
 /* Grr, asm/user.h includes asm/ptrace.h, so has to follow ptrace_user.h */
 #include <asm/user.h>
index 6f4ef2b7fa4a04d8f3ddec2fd8a860534155d7bc..447306b20aea66245d3e5430a7acd11b733dd973 100644 (file)
@@ -2,7 +2,7 @@
 #include <signal.h>
 #include <asm/ptrace.h>
 #include <asm/user.h>
-#include <linux/stddef.h>
+#include <stddef.h>
 #include <sys/poll.h>
 
 #define DEFINE(sym, val) \
index 88d087f527c9ff20b3e7bc591685e6c0cf309367..3a5fd07fe064cef52b7e0a7a0ec42bfb04192168 100644 (file)
@@ -90,6 +90,7 @@
 
 /* Kernel text segment, and some constant data areas.  */
 #define TEXT_CONTENTS                                                        \
+               _text = .;                                                    \
                __stext = . ;                                                 \
                *(.text)                                                      \
                SCHED_TEXT                                                    \
index 010d2265f1cfb05086017020c3533cb737cca614..bfbb9bcae1238d95c0b1468fead6c37f60e1a51e 100644 (file)
@@ -122,7 +122,7 @@ endchoice
 
 choice
        prompt "Processor family"
-       default MK8
+       default GENERIC_CPU
 
 config MK8
        bool "AMD-Opteron/Athlon64"
@@ -130,16 +130,31 @@ config MK8
          Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
 
 config MPSC
-       bool "Intel EM64T"
+       bool "Intel P4 / older Netburst based Xeon"
        help
-         Optimize for Intel Pentium 4 and Xeon CPUs with Intel
-         Extended Memory 64 Technology(EM64T). For details see
+         Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs
+         with Intel Extended Memory 64 Technology(EM64T). For details see
          <http://www.intel.com/technology/64bitextensions/>.
+         Note the the latest Xeons (Xeon 51xx and 53xx) are not based on the
+          Netburst core and shouldn't use this option. You can distingush them
+         using the cpu family field
+         in /proc/cpuinfo. Family 15 is a older Xeon, Family 6 a newer one
+         (this rule only applies to system that support EM64T)
+
+config MCORE2
+       bool "Intel Core2 / newer Xeon"
+       help
+         Optimize for Intel Core2 and newer Xeons (51xx)
+         You can distingush the newer Xeons from the older ones using
+         the cpu family field in /proc/cpuinfo. 15 is a older Xeon
+         (use CONFIG_MPSC then), 6 is a newer one. This rule only
+         applies to CPUs that support EM64T.
 
 config GENERIC_CPU
        bool "Generic-x86-64"
        help
          Generic x86-64 CPU.
+         Run equally well on all x86-64 CPUs.
 
 endchoice
 
@@ -149,12 +164,12 @@ endchoice
 config X86_L1_CACHE_BYTES
        int
        default "128" if GENERIC_CPU || MPSC
-       default "64" if MK8
+       default "64" if MK8 || MCORE2
 
 config X86_L1_CACHE_SHIFT
        int
        default "7" if GENERIC_CPU || MPSC
-       default "6" if MK8
+       default "6" if MK8 || MCORE2
 
 config X86_INTERNODE_CACHE_BYTES
        int
@@ -344,11 +359,6 @@ config ARCH_DISCONTIGMEM_ENABLE
        depends on NUMA
        default y
 
-
-config ARCH_DISCONTIGMEM_ENABLE
-       def_bool y
-       depends on NUMA
-
 config ARCH_DISCONTIGMEM_DEFAULT
        def_bool y
        depends on NUMA
@@ -455,6 +465,17 @@ config CALGARY_IOMMU
          Normally the kernel will make the right choice by itself.
          If unsure, say Y.
 
+config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+       bool "Should Calgary be enabled by default?"
+       default y
+       depends on CALGARY_IOMMU
+       help
+         Should Calgary be enabled by default? if you choose 'y', Calgary
+         will be used (if it exists). If you choose 'n', Calgary will not be
+         used even if it exists. If you choose 'n' and would like to use
+         Calgary anyway, pass 'iommu=calgary' on the kernel command line.
+         If unsure, say Y.
+
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
        bool
index 6e38d4daeed7f6786c7e3067bd5329f0da2cb5ef..b471b8550d0362c2bfe15b71f1e0903c034e3399 100644 (file)
@@ -30,6 +30,10 @@ cflags-y     :=
 cflags-kernel-y        :=
 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
 cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+# gcc doesn't support -march=core2 yet as of gcc 4.3, but I hope it
+# will eventually. Use -mtune=generic as fallback
+cflags-$(CONFIG_MCORE2) += \
+       $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
 cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
 
 cflags-y += -m64
index 0f5d44e86be56d8b3f61a2f204d24a720b6ef92d..96f226cfb3395b4e5a83f749bc33e46694ca1b00 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.19-rc2-git4
-# Sat Oct 21 03:38:52 2006
+# Linux kernel version: 2.6.19-git7
+# Wed Dec  6 23:50:47 2006
 #
 CONFIG_X86_64=y
 CONFIG_64BIT=y
@@ -47,13 +47,14 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 # CONFIG_CPUSETS is not set
+CONFIG_SYSFS_DEPRECATED=y
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
 CONFIG_UID16=y
-# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -87,9 +88,7 @@ CONFIG_STOP_MACHINE=y
 # Block layer
 #
 CONFIG_BLOCK=y
-CONFIG_LBD=y
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
 
 #
 # IO Schedulers
@@ -111,10 +110,11 @@ CONFIG_X86_PC=y
 # CONFIG_X86_VSMP is not set
 # CONFIG_MK8 is not set
 # CONFIG_MPSC is not set
-CONFIG_GENERIC_CPU=y
-CONFIG_X86_L1_CACHE_BYTES=128
-CONFIG_X86_L1_CACHE_SHIFT=7
-CONFIG_X86_INTERNODE_CACHE_BYTES=128
+CONFIG_MCORE2=y
+# CONFIG_GENERIC_CPU is not set
+CONFIG_X86_L1_CACHE_BYTES=64
+CONFIG_X86_L1_CACHE_SHIFT=6
+CONFIG_X86_INTERNODE_CACHE_BYTES=64
 CONFIG_X86_TSC=y
 CONFIG_X86_GOOD_APIC=y
 # CONFIG_MICROCODE is not set
@@ -322,6 +322,7 @@ CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
 CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -624,6 +625,7 @@ CONFIG_SATA_INTEL_COMBINED=y
 # CONFIG_PATA_IT821X is not set
 # CONFIG_PATA_JMICRON is not set
 # CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
 # CONFIG_PATA_MPIIX is not set
 # CONFIG_PATA_OLDPIIX is not set
 # CONFIG_PATA_NETCELL is not set
@@ -795,6 +797,7 @@ CONFIG_BNX2=y
 CONFIG_S2IO=m
 # CONFIG_S2IO_NAPI is not set
 # CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
 
 #
 # Token Ring devices
@@ -927,10 +930,6 @@ CONFIG_RTC=y
 # CONFIG_DTLK is not set
 # CONFIG_R3964 is not set
 # CONFIG_APPLICOM is not set
-
-#
-# Ftape, the floppy tape device driver
-#
 CONFIG_AGP=y
 CONFIG_AGP_AMD64=y
 CONFIG_AGP_INTEL=y
@@ -1135,6 +1134,7 @@ CONFIG_USB_DEVICEFS=y
 # CONFIG_USB_BANDWIDTH is not set
 # CONFIG_USB_DYNAMIC_MINORS is not set
 # CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_MULTITHREAD_PROBE is not set
 # CONFIG_USB_OTG is not set
 
 #
@@ -1212,6 +1212,7 @@ CONFIG_USB_HIDINPUT=y
 # CONFIG_USB_KAWETH is not set
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET_MII is not set
 # CONFIG_USB_USBNET is not set
 CONFIG_USB_MON=y
 
index 82ef182de6aee197be2c796e799d24d37dff0ae3..543ef4f405e9b9589062e97d2ace859e27fbef83 100644 (file)
@@ -305,8 +305,6 @@ MODULE_AUTHOR("Eric Youngdale, Andi Kleen");
 #undef MODULE_DESCRIPTION
 #undef MODULE_AUTHOR
 
-#define elf_addr_t __u32
-
 static void elf32_init(struct pt_regs *);
 
 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
@@ -351,7 +349,7 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt) 
                return -ENOMEM; 
 
index 0e0a266d976fb1ee5c1b446c119b2fa2c6e34afb..ff499ef2a1ba10674cba34782c546cbeac43a51a 100644 (file)
@@ -584,6 +584,11 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->rdx = (unsigned long) &frame->info;
        regs->rcx = (unsigned long) &frame->uc;
 
+       /* Make -mregparm=3 work */
+       regs->rax = sig;
+       regs->rdx = (unsigned long) &frame->info;
+       regs->rcx = (unsigned long) &frame->uc;
+
        asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); 
        asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); 
        
index 3a01329473ab42642210804fc586122d433007f8..3e5ed20cba457c7f5e0ff149cab3ec124d20ef52 100644 (file)
@@ -49,7 +49,7 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
        struct mm_struct *mm = current->mm;
        int ret;
 
-       vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!vma)
                return -ENOMEM;
 
index 4d9d5ed942b293f3383c42beb3418373143050ea..124b2d27b4acd2e912d0d7db6f9dbe1a85bb5019 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/sysdev.h>
 #include <linux/module.h>
+#include <linux/ioport.h>
 
 #include <asm/atomic.h>
 #include <asm/smp.h>
@@ -45,6 +46,12 @@ int apic_calibrate_pmtmr __initdata;
 
 int disable_apic_timer __initdata;
 
+static struct resource *ioapic_resources;
+static struct resource lapic_resource = {
+       .name = "Local APIC",
+       .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
 /*
  * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
  * IPIs in place of local APIC timers
@@ -133,7 +140,6 @@ void clear_local_APIC(void)
                apic_write(APIC_LVTERR, APIC_LVT_MASKED);
        if (maxlvt >= 4)
                apic_write(APIC_LVTPC, APIC_LVT_MASKED);
-       v = GET_APIC_VERSION(apic_read(APIC_LVR));
        apic_write(APIC_ESR, 0);
        apic_read(APIC_ESR);
 }
@@ -452,23 +458,30 @@ static struct {
 static int lapic_suspend(struct sys_device *dev, pm_message_t state)
 {
        unsigned long flags;
+       int maxlvt;
 
        if (!apic_pm_state.active)
                return 0;
 
+       maxlvt = get_maxlvt();
+
        apic_pm_state.apic_id = apic_read(APIC_ID);
        apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
        apic_pm_state.apic_ldr = apic_read(APIC_LDR);
        apic_pm_state.apic_dfr = apic_read(APIC_DFR);
        apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
        apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-       apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+       if (maxlvt >= 4)
+               apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
        apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
        apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
        apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
        apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
        apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-       apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#ifdef CONFIG_X86_MCE_INTEL
+       if (maxlvt >= 5)
+               apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
        local_irq_save(flags);
        disable_local_APIC();
        local_irq_restore(flags);
@@ -479,10 +492,13 @@ static int lapic_resume(struct sys_device *dev)
 {
        unsigned int l, h;
        unsigned long flags;
+       int maxlvt;
 
        if (!apic_pm_state.active)
                return 0;
 
+       maxlvt = get_maxlvt();
+
        local_irq_save(flags);
        rdmsr(MSR_IA32_APICBASE, l, h);
        l &= ~MSR_IA32_APICBASE_BASE;
@@ -496,8 +512,12 @@ static int lapic_resume(struct sys_device *dev)
        apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
        apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
        apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-       apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-       apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+#ifdef CONFIG_X86_MCE_INTEL
+       if (maxlvt >= 5)
+               apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+       if (maxlvt >= 4)
+               apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
        apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
        apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
        apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
@@ -585,6 +605,64 @@ static int __init detect_init_APIC (void)
        return 0;
 }
 
+#ifdef CONFIG_X86_IO_APIC
+static struct resource * __init ioapic_setup_resources(void)
+{
+#define IOAPIC_RESOURCE_NAME_SIZE 11
+       unsigned long n;
+       struct resource *res;
+       char *mem;
+       int i;
+
+       if (nr_ioapics <= 0)
+               return NULL;
+
+       n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+       n *= nr_ioapics;
+
+       mem = alloc_bootmem(n);
+       res = (void *)mem;
+
+       if (mem != NULL) {
+               memset(mem, 0, n);
+               mem += sizeof(struct resource) * nr_ioapics;
+
+               for (i = 0; i < nr_ioapics; i++) {
+                       res[i].name = mem;
+                       res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+                       sprintf(mem,  "IOAPIC %u", i);
+                       mem += IOAPIC_RESOURCE_NAME_SIZE;
+               }
+       }
+
+       ioapic_resources = res;
+
+       return res;
+}
+
+static int __init ioapic_insert_resources(void)
+{
+       int i;
+       struct resource *r = ioapic_resources;
+
+       if (!r) {
+               printk("IO APIC resources could be not be allocated.\n");
+               return -1;
+       }
+
+       for (i = 0; i < nr_ioapics; i++) {
+               insert_resource(&iomem_resource, r);
+               r++;
+       }
+
+       return 0;
+}
+
+/* Insert the IO APIC resources after PCI initialization has occured to handle
+ * IO APICS that are mapped in on a BAR in PCI space. */
+late_initcall(ioapic_insert_resources);
+#endif
+
 void __init init_apic_mappings(void)
 {
        unsigned long apic_phys;
@@ -604,6 +682,11 @@ void __init init_apic_mappings(void)
        apic_mapped = 1;
        apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
 
+       /* Put local APIC into the resource map. */
+       lapic_resource.start = apic_phys;
+       lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+       insert_resource(&iomem_resource, &lapic_resource);
+
        /*
         * Fetch the APIC ID of the BSP in case we have a
         * default configuration (or the MP table is broken).
@@ -613,7 +696,9 @@ void __init init_apic_mappings(void)
        {
                unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
                int i;
+               struct resource *ioapic_res;
 
+               ioapic_res = ioapic_setup_resources();
                for (i = 0; i < nr_ioapics; i++) {
                        if (smp_found_config) {
                                ioapic_phys = mp_ioapics[i].mpc_apicaddr;
@@ -625,6 +710,12 @@ void __init init_apic_mappings(void)
                        apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
                                        __fix_to_virt(idx), ioapic_phys);
                        idx++;
+
+                       if (ioapic_res != NULL) {
+                               ioapic_res->start = ioapic_phys;
+                               ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+                               ioapic_res++;
+                       }
                }
        }
 }
@@ -644,10 +735,9 @@ void __init init_apic_mappings(void)
 
 static void __setup_APIC_LVTT(unsigned int clocks)
 {
-       unsigned int lvtt_value, tmp_value, ver;
+       unsigned int lvtt_value, tmp_value;
        int cpu = smp_processor_id();
 
-       ver = GET_APIC_VERSION(apic_read(APIC_LVR));
        lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
 
        if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
index 3525f884af82772592adf860d926de115b3b2d8a..95a7a2c13131d0f9c99818f8859fb792eabb57c6 100644 (file)
 /* This keeps a track of which one is crashing cpu. */
 static int crashing_cpu;
 
-static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
-                                               void *data, size_t data_len)
-{
-       struct elf_note note;
-
-       note.n_namesz = strlen(name) + 1;
-       note.n_descsz = data_len;
-       note.n_type   = type;
-       memcpy(buf, &note, sizeof(note));
-       buf += (sizeof(note) +3)/4;
-       memcpy(buf, name, note.n_namesz);
-       buf += (note.n_namesz + 3)/4;
-       memcpy(buf, data, note.n_descsz);
-       buf += (note.n_descsz + 3)/4;
-
-       return buf;
-}
-
-static void final_note(u32 *buf)
-{
-       struct elf_note note;
-
-       note.n_namesz = 0;
-       note.n_descsz = 0;
-       note.n_type   = 0;
-       memcpy(buf, &note, sizeof(note));
-}
-
-static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
-{
-       struct elf_prstatus prstatus;
-       u32 *buf;
-
-       if ((cpu < 0) || (cpu >= NR_CPUS))
-               return;
-
-       /* Using ELF notes here is opportunistic.
-        * I need a well defined structure format
-        * for the data I pass, and I need tags
-        * on the data to indicate what information I have
-        * squirrelled away.  ELF notes happen to provide
-        * all of that, no need to invent something new.
-        */
-
-       buf = (u32*)per_cpu_ptr(crash_notes, cpu);
-
-       if (!buf)
-               return;
-
-       memset(&prstatus, 0, sizeof(prstatus));
-       prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
-       buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
-                                       sizeof(prstatus));
-       final_note(buf);
-}
-
-static void crash_save_self(struct pt_regs *regs)
-{
-       int cpu;
-
-       cpu = smp_processor_id();
-       crash_save_this_cpu(regs, cpu);
-}
-
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -117,7 +52,7 @@ static int crash_nmi_callback(struct notifier_block *self,
                return NOTIFY_STOP;
        local_irq_disable();
 
-       crash_save_this_cpu(regs, cpu);
+       crash_save_cpu(regs, cpu);
        disable_local_APIC();
        atomic_dec(&waiting_for_crash_ipi);
        /* Assume hlt works */
@@ -196,5 +131,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
 
        disable_IO_APIC();
 
-       crash_save_self(regs);
+       crash_save_cpu(regs, smp_processor_id());
 }
index 68273bff58ccf251f1f83c5da27e4a71b8f97f5b..829698f6d0490962c387f884f550678b90f8db19 100644 (file)
@@ -69,11 +69,18 @@ static void nvidia_bugs(void)
 
 static void ati_bugs(void)
 {
-       if (timer_over_8254 == 1) {
-               timer_over_8254 = 0;
-               printk(KERN_INFO
-               "ATI board detected. Disabling timer routing over 8254.\n");
-       }
+}
+
+static void intel_bugs(void)
+{
+       u16 device = read_pci_config_16(0, 0, 0, PCI_DEVICE_ID);
+
+#ifdef CONFIG_SMP
+       if (device == PCI_DEVICE_ID_INTEL_E7320_MCH ||
+           device == PCI_DEVICE_ID_INTEL_E7520_MCH ||
+           device == PCI_DEVICE_ID_INTEL_E7525_MCH)
+               quirk_intel_irqbalance();
+#endif
 }
 
 struct chipset {
@@ -85,6 +92,7 @@ static struct chipset early_qrk[] = {
        { PCI_VENDOR_ID_NVIDIA, nvidia_bugs },
        { PCI_VENDOR_ID_VIA, via_bugs },
        { PCI_VENDOR_ID_ATI, ati_bugs },
+       { PCI_VENDOR_ID_INTEL, intel_bugs},
        {}
 };
 
index 7d401b00d8227dbd6182524259a402822a903db6..601d332c4b79edc11252d6a835bbe77e6a2812e9 100644 (file)
@@ -230,7 +230,6 @@ ENTRY(system_call)
        CFI_REL_OFFSET rip,RIP-ARGOFFSET
        GET_THREAD_INFO(%rcx)
        testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
-       CFI_REMEMBER_STATE
        jnz tracesys
        cmpq $__NR_syscall_max,%rax
        ja badsys
@@ -241,7 +240,6 @@ ENTRY(system_call)
  * Syscall return path ending with SYSRET (fast path)
  * Has incomplete stack frame and undefined top of stack. 
  */            
-       .globl ret_from_sys_call
 ret_from_sys_call:
        movl $_TIF_ALLWORK_MASK,%edi
        /* edi: flagmask */
@@ -251,8 +249,8 @@ sysret_check:
        TRACE_IRQS_OFF
        movl threadinfo_flags(%rcx),%edx
        andl %edi,%edx
-       CFI_REMEMBER_STATE
        jnz  sysret_careful 
+       CFI_REMEMBER_STATE
        /*
         * sysretq will re-enable interrupts:
         */
@@ -265,10 +263,10 @@ sysret_check:
        swapgs
        sysretq
 
+       CFI_RESTORE_STATE
        /* Handle reschedules */
        /* edx: work, edi: workmask */  
 sysret_careful:
-       CFI_RESTORE_STATE
        bt $TIF_NEED_RESCHED,%edx
        jnc sysret_signal
        TRACE_IRQS_ON
@@ -306,7 +304,6 @@ badsys:
 
        /* Do syscall tracing */
 tracesys:                       
-       CFI_RESTORE_STATE
        SAVE_REST
        movq $-ENOSYS,RAX(%rsp)
        FIXUP_TOP_OF_STACK %rdi
@@ -322,32 +319,13 @@ tracesys:
        call *sys_call_table(,%rax,8)
 1:     movq %rax,RAX-ARGOFFSET(%rsp)
        /* Use IRET because user could have changed frame */
-       jmp int_ret_from_sys_call
-       CFI_ENDPROC
-END(system_call)
                
 /* 
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
- */    
-ENTRY(int_ret_from_sys_call)
-       CFI_STARTPROC   simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,SS+8-ARGOFFSET
-       /*CFI_REL_OFFSET        ss,SS-ARGOFFSET*/
-       CFI_REL_OFFSET  rsp,RSP-ARGOFFSET
-       /*CFI_REL_OFFSET        rflags,EFLAGS-ARGOFFSET*/
-       /*CFI_REL_OFFSET        cs,CS-ARGOFFSET*/
-       CFI_REL_OFFSET  rip,RIP-ARGOFFSET
-       CFI_REL_OFFSET  rdx,RDX-ARGOFFSET
-       CFI_REL_OFFSET  rcx,RCX-ARGOFFSET
-       CFI_REL_OFFSET  rax,RAX-ARGOFFSET
-       CFI_REL_OFFSET  rdi,RDI-ARGOFFSET
-       CFI_REL_OFFSET  rsi,RSI-ARGOFFSET
-       CFI_REL_OFFSET  r8,R8-ARGOFFSET
-       CFI_REL_OFFSET  r9,R9-ARGOFFSET
-       CFI_REL_OFFSET  r10,R10-ARGOFFSET
-       CFI_REL_OFFSET  r11,R11-ARGOFFSET
+ */
+       .globl int_ret_from_sys_call
+int_ret_from_sys_call:
        cli
        TRACE_IRQS_OFF
        testl $3,CS-ARGOFFSET(%rsp)
@@ -394,8 +372,6 @@ int_very_careful:
        popq %rdi
        CFI_ADJUST_CFA_OFFSET -8
        andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
-       cli
-       TRACE_IRQS_OFF
        jmp int_restore_rest
        
 int_signal:
@@ -411,7 +387,7 @@ int_restore_rest:
        TRACE_IRQS_OFF
        jmp int_with_check
        CFI_ENDPROC
-END(int_ret_from_sys_call)
+END(system_call)
                
 /* 
  * Certain special system calls that need to save a complete full stack frame.
index 8e78a75d1866ffc433cb2eb4d28dc7047a0e9a64..b007433f96bbc540b2d71ef9b50f81d8349a2174 100644 (file)
@@ -33,7 +33,7 @@ extern struct genapic apic_flat;
 extern struct genapic apic_physflat;
 
 struct genapic *genapic = &apic_flat;
-
+struct genapic *genapic_force;
 
 /*
  * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
@@ -46,6 +46,13 @@ void __init clustered_apic_check(void)
        u8 cluster_cnt[NUM_APIC_CLUSTERS];
        int max_apic = 0;
 
+       /* genapic selection can be forced because of certain quirks.
+        */
+       if (genapic_force) {
+               genapic = genapic_force;
+               goto print;
+       }
+
 #if defined(CONFIG_ACPI)
        /*
         * Some x86_64 machines use physical APIC mode regardless of how many
index 9561eb3c5b5c5e78eed93277096eaf9d68752a64..cc230b93cd1c7081ea4483ac2aa8ec8192eb972a 100644 (file)
@@ -57,10 +57,12 @@ void __init x86_64_start_kernel(char * real_mode_data)
 {
        int i;
 
-       for (i = 0; i < 256; i++)
+       /* clear bss before set_intr_gate with early_idt_handler */
+       clear_bss();
+
+       for (i = 0; i < IDT_ENTRIES; i++)
                set_intr_gate(i, early_idt_handler);
        asm volatile("lidt %0" :: "m" (idt_descr));
-       clear_bss();
 
        early_printk("Kernel alive\n");
 
index 3aa1e9bb781df686854f34816b0fff661ccb9847..1d58c13bc6bc0d16d0b859846e3549c7131c5d61 100644 (file)
@@ -82,11 +82,8 @@ int save_i387(struct _fpstate __user *buf)
        struct task_struct *tsk = current;
        int err = 0;
 
-       { 
-               extern void bad_user_i387_struct(void); 
-               if (sizeof(struct user_i387_struct) != sizeof(tsk->thread.i387.fxsave))
-                       bad_user_i387_struct();
-       } 
+       BUILD_BUG_ON(sizeof(struct user_i387_struct) !=
+                       sizeof(tsk->thread.i387.fxsave));
 
        if ((unsigned long)buf % 16) 
                printk("save_i387: bad fpstate %p\n",buf); 
index c4ef801b765b8004033c83d2f6b1ac94b822fb9f..d73c79e821f172ee3efe48f490e97b947bb8c153 100644 (file)
@@ -76,7 +76,8 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
        IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
        IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
 
-void (*interrupt[NR_IRQS])(void) = {
+/* for the irq vectors */
+static void (*interrupt[NR_VECTORS - FIRST_EXTERNAL_VECTOR])(void) = {
                                          IRQLIST_16(0x2), IRQLIST_16(0x3),
        IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
        IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
index c80081a6ba415584553eea8c78377663914e4347..2a1dcd5f69c2599e3b3e4b41b355a10f672f79d7 100644 (file)
@@ -55,10 +55,6 @@ int sis_apic_bug; /* not actually supported, dummy for compile */
 
 static int no_timer_check;
 
-static int disable_timer_pin_1 __initdata;
-
-int timer_over_8254 __initdata = 1;
-
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
@@ -178,14 +174,20 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
  * the interrupt, and we need to make sure the entry is fully populated
  * before that happens.
  */
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 {
-       unsigned long flags;
        union entry_union eu;
        eu.entry = e;
-       spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(apic, 0x11 + 2*pin, eu.w2);
        io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
+static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __ioapic_write_entry(apic, pin, e);
        spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -348,29 +350,6 @@ static int __init disable_ioapic_setup(char *str)
 }
 early_param("noapic", disable_ioapic_setup);
 
-/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init disable_timer_pin_setup(char *arg)
-{
-       disable_timer_pin_1 = 1;
-       return 1;
-}
-__setup("disable_timer_pin_1", disable_timer_pin_setup);
-
-static int __init setup_disable_8254_timer(char *s)
-{
-       timer_over_8254 = -1;
-       return 1;
-}
-static int __init setup_enable_8254_timer(char *s)
-{
-       timer_over_8254 = 2;
-       return 1;
-}
-
-__setup("disable_8254_timer", setup_disable_8254_timer);
-__setup("enable_8254_timer", setup_enable_8254_timer);
-
-
 /*
  * Find the IRQ entry number of a certain pin.
  */
@@ -750,6 +729,22 @@ static int assign_irq_vector(int irq, cpumask_t mask, cpumask_t *result)
        return vector;
 }
 
+static void __clear_irq_vector(int irq)
+{
+       cpumask_t mask;
+       int cpu, vector;
+
+       BUG_ON(!irq_vector[irq]);
+
+       vector = irq_vector[irq];
+       cpus_and(mask, irq_domain[irq], cpu_online_map);
+       for_each_cpu_mask(cpu, mask)
+               per_cpu(vector_irq, cpu)[vector] = -1;
+
+       irq_vector[irq] = 0;
+       irq_domain[irq] = CPU_MASK_NONE;
+}
+
 void __setup_vector_irq(int cpu)
 {
        /* Initialize vector_irq on a new cpu */
@@ -794,27 +789,65 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
                                              handle_edge_irq, "edge");
        }
 }
-
-static void __init setup_IO_APIC_irqs(void)
+static void __init setup_IO_APIC_irq(int apic, int pin, int idx, int irq)
 {
        struct IO_APIC_route_entry entry;
-       int apic, pin, idx, irq, first_notcon = 1, vector;
+       int vector;
        unsigned long flags;
 
-       apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
 
-       for (apic = 0; apic < nr_ioapics; apic++) {
-       for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+       /*
+        * add it to the IO-APIC irq-routing table:
+        */
+       memset(&entry,0,sizeof(entry));
 
-               /*
-                * add it to the IO-APIC irq-routing table:
-                */
-               memset(&entry,0,sizeof(entry));
+       entry.delivery_mode = INT_DELIVERY_MODE;
+       entry.dest_mode = INT_DEST_MODE;
+       entry.mask = 0;                         /* enable IRQ */
+       entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+
+       entry.trigger = irq_trigger(idx);
+       entry.polarity = irq_polarity(idx);
 
-               entry.delivery_mode = INT_DELIVERY_MODE;
-               entry.dest_mode = INT_DEST_MODE;
-               entry.mask = 0;                         /* enable IRQ */
+       if (irq_trigger(idx)) {
+               entry.trigger = 1;
+               entry.mask = 1;
                entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
+       }
+
+       if (!apic && !IO_APIC_IRQ(irq))
+               return;
+
+       if (IO_APIC_IRQ(irq)) {
+               cpumask_t mask;
+               vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
+               if (vector < 0)
+                       return;
+
+               entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
+               entry.vector = vector;
+
+               ioapic_register_intr(irq, vector, IOAPIC_AUTO);
+               if (!apic && (irq < 16))
+                       disable_8259A_irq(irq);
+       }
+
+       ioapic_write_entry(apic, pin, entry);
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       set_native_irq_info(irq, TARGET_CPUS);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+}
+
+static void __init setup_IO_APIC_irqs(void)
+{
+       int apic, pin, idx, irq, first_notcon = 1;
+
+       apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+       for (apic = 0; apic < nr_ioapics; apic++) {
+       for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
 
                idx = find_irq_entry(apic,pin,mp_INT);
                if (idx == -1) {
@@ -826,39 +859,11 @@ static void __init setup_IO_APIC_irqs(void)
                        continue;
                }
 
-               entry.trigger = irq_trigger(idx);
-               entry.polarity = irq_polarity(idx);
-
-               if (irq_trigger(idx)) {
-                       entry.trigger = 1;
-                       entry.mask = 1;
-                       entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
-               }
-
                irq = pin_2_irq(idx, apic, pin);
                add_pin_to_irq(irq, apic, pin);
 
-               if (!apic && !IO_APIC_IRQ(irq))
-                       continue;
-
-               if (IO_APIC_IRQ(irq)) {
-                       cpumask_t mask;
-                       vector = assign_irq_vector(irq, TARGET_CPUS, &mask);
-                       if (vector < 0)
-                               continue;
-
-                       entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
-                       entry.vector = vector;
-
-                       ioapic_register_intr(irq, vector, IOAPIC_AUTO);
-                       if (!apic && (irq < 16))
-                               disable_8259A_irq(irq);
-               }
-               ioapic_write_entry(apic, pin, entry);
+               setup_IO_APIC_irq(apic, pin, idx, irq);
 
-               spin_lock_irqsave(&ioapic_lock, flags);
-               set_native_irq_info(irq, TARGET_CPUS);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
        }
        }
 
@@ -1563,10 +1568,33 @@ static inline void unlock_ExtINT_logic(void)
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
  * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
  * fanatically on his truly buggy board.
- *
- * FIXME: really need to revamp this for modern platforms only.
  */
-static inline void check_timer(void)
+
+static int try_apic_pin(int apic, int pin, char *msg)
+{
+       apic_printk(APIC_VERBOSE, KERN_INFO
+                   "..TIMER: trying IO-APIC=%d PIN=%d %s",
+                   apic, pin, msg);
+
+       /*
+        * Ok, does IRQ0 through the IOAPIC work?
+        */
+       if (!no_timer_check && timer_irq_works()) {
+               nmi_watchdog_default();
+               if (nmi_watchdog == NMI_IO_APIC) {
+                       disable_8259A_irq(0);
+                       setup_nmi();
+                       enable_8259A_irq(0);
+               }
+               return 1;
+       }
+       clear_IO_APIC_pin(apic, pin);
+       apic_printk(APIC_QUIET, KERN_ERR " .. failed\n");
+       return 0;
+}
+
+/* The function from hell */
+static void check_timer(void)
 {
        int apic1, pin1, apic2, pin2;
        int vector;
@@ -1587,61 +1615,43 @@ static inline void check_timer(void)
         */
        apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
        init_8259A(1);
-       if (timer_over_8254 > 0)
-               enable_8259A_irq(0);
 
        pin1  = find_isa_irq_pin(0, mp_INT);
        apic1 = find_isa_irq_apic(0, mp_INT);
        pin2  = ioapic_i8259.pin;
        apic2 = ioapic_i8259.apic;
 
-       apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
-               vector, apic1, pin1, apic2, pin2);
+       /* Do this first, otherwise we get double interrupts on ATI boards */
+       if ((pin1 != -1) && try_apic_pin(apic1, pin1,"with 8259 IRQ0 disabled"))
+               return;
 
-       if (pin1 != -1) {
-               /*
-                * Ok, does IRQ0 through the IOAPIC work?
-                */
-               unmask_IO_APIC_irq(0);
-               if (!no_timer_check && timer_irq_works()) {
-                       nmi_watchdog_default();
-                       if (nmi_watchdog == NMI_IO_APIC) {
-                               disable_8259A_irq(0);
-                               setup_nmi();
-                               enable_8259A_irq(0);
-                       }
-                       if (disable_timer_pin_1 > 0)
-                               clear_IO_APIC_pin(0, pin1);
-                       return;
-               }
-               clear_IO_APIC_pin(apic1, pin1);
-               apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
-                               "connected to IO-APIC\n");
-       }
+       /* Now try again with IRQ0 8259A enabled.
+          Assumes timer is on IO-APIC 0 ?!? */
+       enable_8259A_irq(0);
+       unmask_IO_APIC_irq(0);
+       if (try_apic_pin(apic1, pin1, "with 8259 IRQ0 enabled"))
+               return;
+       disable_8259A_irq(0);
+
+       /* Always try pin0 and pin2 on APIC 0 to handle buggy timer overrides
+          on Nvidia boards */
+       if (!(apic1 == 0 && pin1 == 0) &&
+           try_apic_pin(0, 0, "fallback with 8259 IRQ0 disabled"))
+               return;
+       if (!(apic1 == 0 && pin1 == 2) &&
+           try_apic_pin(0, 2, "fallback with 8259 IRQ0 disabled"))
+               return;
 
-       apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
-                               "through the 8259A ... ");
+       /* Then try pure 8259A routing on the 8259 as reported by BIOS*/
+       enable_8259A_irq(0);
        if (pin2 != -1) {
-               apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
-                       apic2, pin2);
-               /*
-                * legacy devices should be connected to IO APIC #0
-                */
                setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
-               if (timer_irq_works()) {
-                       apic_printk(APIC_VERBOSE," works.\n");
-                       nmi_watchdog_default();
-                       if (nmi_watchdog == NMI_IO_APIC) {
-                               setup_nmi();
-                       }
+               if (try_apic_pin(apic2,pin2,"8259A broadcast ExtINT from BIOS"))
                        return;
-               }
-               /*
-                * Cleanup, just in case ...
-                */
-               clear_IO_APIC_pin(apic2, pin2);
        }
-       apic_printk(APIC_VERBOSE," failed.\n");
+
+       /* Tried all possibilities to go through the IO-APIC. Now come the
+          really cheesy fallbacks. */
 
        if (nmi_watchdog == NMI_IO_APIC) {
                printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
@@ -1837,7 +1847,7 @@ void destroy_irq(unsigned int irq)
        dynamic_irq_cleanup(irq);
 
        spin_lock_irqsave(&vector_lock, flags);
-       irq_vector[irq] = 0;
+       __clear_irq_vector(irq);
        spin_unlock_irqrestore(&vector_lock, flags);
 }
 
@@ -2139,7 +2149,15 @@ void __init setup_ioapic_dest(void)
                        if (irq_entry == -1)
                                continue;
                        irq = pin_2_irq(irq_entry, ioapic, pin);
-                       set_ioapic_affinity_irq(irq, TARGET_CPUS);
+
+                       /* setup_IO_APIC_irqs could fail to get vector for some device
+                        * when you have too many devices, because at that time only boot
+                        * cpu is online.
+                        */
+                       if(!irq_vector[irq])
+                               setup_IO_APIC_irq(ioapic, pin, irq_entry, irq);
+                       else
+                               set_ioapic_affinity_irq(irq, TARGET_CPUS);
                }
 
        }
index e46c55856d40ae6fb85daf785ca438dccba1b042..0c06af6c13bcb6c4d4e8331da76279442ce378a3 100644 (file)
@@ -120,7 +120,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
 
        if (likely(irq < NR_IRQS))
                generic_handle_irq(irq);
-       else
+       else if (printk_ratelimit())
                printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
                        __func__, smp_processor_id(), vector);
 
index ac241567e682eb45c4a7918b805082cddd52f5cb..209c8c0bec717c226f9338faced9ac243b7904c9 100644 (file)
@@ -224,7 +224,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
        mutex_lock(&kprobe_mutex);
-       free_insn_slot(p->ainsn.insn);
+       free_insn_slot(p->ainsn.insn, 0);
        mutex_unlock(&kprobe_mutex);
 }
 
index bbea88801d883cc8928c6a9c91fe5bf5efbf4c9f..ac085038af2924aab65b5eb82c77fdc26d5430d6 100644 (file)
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
  */
 
 static int check_interval = 5 * 60; /* 5 minutes */
-static void mcheck_timer(void *data);
-static DECLARE_WORK(mcheck_work, mcheck_timer, NULL);
+static void mcheck_timer(struct work_struct *work);
+static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
 
 static void mcheck_check_cpu(void *info)
 {
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info)
                do_machine_check(NULL, 0);
 }
 
-static void mcheck_timer(void *data)
+static void mcheck_timer(struct work_struct *work)
 {
        on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
        schedule_delayed_work(&mcheck_work, check_interval * HZ);
@@ -641,7 +641,6 @@ static __cpuinit int mce_create_device(unsigned int cpu)
        return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void mce_remove_device(unsigned int cpu)
 {
        int i;
@@ -652,6 +651,7 @@ static void mce_remove_device(unsigned int cpu)
        sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
        sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
        sysdev_unregister(&per_cpu(device_mce,cpu));
+       memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
 }
 
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
@@ -674,7 +674,6 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 static struct notifier_block mce_cpu_notifier = {
        .notifier_call = mce_cpu_callback,
 };
-#endif
 
 static __init int mce_init_device(void)
 {
index 883fe747f64c3346e21f9d8c7ceed22244287b47..fa09debad4b7c18b34966ebb3bd13bcca0937b9e 100644 (file)
@@ -551,7 +551,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * let's be hotplug friendly.
  * in case of multiple core processors, the first core always takes ownership
@@ -594,12 +593,14 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 
        sprintf(name, "threshold_bank%i", bank);
 
+#ifdef CONFIG_SMP
        /* sibling symlink */
        if (shared_bank[bank] && b->blocks->cpu != cpu) {
                sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
                per_cpu(threshold_banks, cpu)[bank] = NULL;
                return;
        }
+#endif
 
        /* remove all sibling symlinks before unregistering */
        for_each_cpu_mask(i, b->cpus) {
@@ -656,7 +657,6 @@ static int threshold_cpu_callback(struct notifier_block *nfb,
 static struct notifier_block threshold_cpu_notifier = {
        .notifier_call = threshold_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static __init int threshold_init_device(void)
 {
index b147ab19fbd4d65cef63c494ed8f2ea80dddd90e..08072568847d997ce0cc50d05ff3831ef33a07d9 100644 (file)
@@ -35,8 +35,6 @@
 int smp_found_config;
 unsigned int __initdata maxcpus = NR_CPUS;
 
-int acpi_found_madt;
-
 /*
  * Various Linux-internal data structures created from the
  * MP-table.
index 7af9cb3e2d99a8e71e54d89c170245580b2d470e..27e95e7922c16e6f2d637f9b677927cc049b989e 100644 (file)
  *  Mikael Pettersson  : PM converted to driver model. Disable/enable API.
  */
 
+#include <linux/nmi.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/sysdev.h>
-#include <linux/nmi.h>
 #include <linux/sysctl.h>
 #include <linux/kprobes.h>
+#include <linux/cpumask.h>
 
 #include <asm/smp.h>
 #include <asm/nmi.h>
@@ -41,6 +42,8 @@ int panic_on_unrecovered_nmi;
 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
 
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
  */
@@ -782,6 +785,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
 {
        int sum;
        int touched = 0;
+       int cpu = smp_processor_id();
        struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
        u64 dummy;
        int rc=0;
@@ -799,6 +803,16 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
                touched = 1;
        }
 
+       if (cpu_isset(cpu, backtrace_mask)) {
+               static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
+
+               spin_lock(&lock);
+               printk("NMI backtrace for cpu %d\n", cpu);
+               dump_stack();
+               spin_unlock(&lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+
 #ifdef CONFIG_X86_MCE
        /* Could check oops_in_progress here too, but it's safer
           not too */
@@ -931,6 +945,19 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
 
 #endif
 
+void __trigger_all_cpu_backtrace(void)
+{
+       int i;
+
+       backtrace_mask = cpu_online_map;
+       /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpus_empty(backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+}
+
 EXPORT_SYMBOL(nmi_active);
 EXPORT_SYMBOL(nmi_watchdog);
 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
index 37a770859e7180ae38e06c22d6064fbe1a4ebeab..3215675ab128b35ce7c793cd234b62f5a8ac795a 100644 (file)
 #include <asm/pci-direct.h>
 #include <asm/system.h>
 #include <asm/dma.h>
+#include <asm/rio.h>
+
+#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
+int use_calgary __read_mostly = 1;
+#else
+int use_calgary __read_mostly = 0;
+#endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
 
 #define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
 #define PCI_VENDOR_DEVICE_ID_CALGARY \
@@ -115,14 +122,35 @@ static const unsigned long phb_offsets[] = {
        0xB000 /* PHB3 */
 };
 
+/* PHB debug registers */
+
+static const unsigned long phb_debug_offsets[] = {
+       0x4000  /* PHB 0 DEBUG */,
+       0x5000  /* PHB 1 DEBUG */,
+       0x6000  /* PHB 2 DEBUG */,
+       0x7000  /* PHB 3 DEBUG */
+};
+
+/*
+ * STUFF register for each debug PHB,
+ * byte 1 = start bus number, byte 2 = end bus number
+ */
+
+#define PHB_DEBUG_STUFF_OFFSET 0x0020
+
 unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
 static int translate_empty_slots __read_mostly = 0;
 static int calgary_detected __read_mostly = 0;
 
+static struct rio_table_hdr    *rio_table_hdr __initdata;
+static struct scal_detail      *scal_devs[MAX_NUMNODES] __initdata;
+static struct rio_detail       *rio_devs[MAX_NUMNODES * 4] __initdata;
+
 struct calgary_bus_info {
        void *tce_space;
        unsigned char translation_disabled;
        signed char phbid;
+       void __iomem *bbar;
 };
 
 static struct calgary_bus_info bus_info[MAX_PHB_BUS_NUM] = { { NULL, 0, 0 }, };
@@ -475,6 +503,11 @@ static struct dma_mapping_ops calgary_dma_ops = {
        .unmap_sg = calgary_unmap_sg,
 };
 
+static inline void __iomem * busno_to_bbar(unsigned char num)
+{
+       return bus_info[num].bbar;
+}
+
 static inline int busno_to_phbid(unsigned char num)
 {
        return bus_info[num].phbid;
@@ -620,14 +653,9 @@ static void __init calgary_reserve_peripheral_mem_2(struct pci_dev *dev)
 static void __init calgary_reserve_regions(struct pci_dev *dev)
 {
        unsigned int npages;
-       void __iomem *bbar;
-       unsigned char busnum;
        u64 start;
        struct iommu_table *tbl = dev->sysdata;
 
-       bbar = tbl->bbar;
-       busnum = dev->bus->number;
-
        /* reserve bad_dma_address in case it's a legal address */
        iommu_range_reserve(tbl, bad_dma_address, 1);
 
@@ -740,7 +768,7 @@ static void __init calgary_increase_split_completion_timeout(void __iomem *bbar,
 {
        u64 val64;
        void __iomem *target;
-       unsigned long phb_shift = -1;
+       unsigned int phb_shift = ~0; /* silence gcc */
        u64 mask;
 
        switch (busno_to_phbid(busnum)) {
@@ -828,33 +856,6 @@ static void __init calgary_disable_translation(struct pci_dev *dev)
        del_timer_sync(&tbl->watchdog_timer);
 }
 
-static inline unsigned int __init locate_register_space(struct pci_dev *dev)
-{
-       int rionodeid;
-       u32 address;
-
-       /*
-        * Each Calgary has four busses. The first four busses (first Calgary)
-        * have RIO node ID 2, then the next four (second Calgary) have RIO
-        * node ID 3, the next four (third Calgary) have node ID 2 again, etc.
-        * We use a gross hack - relying on the dev->bus->number ordering,
-        * modulo 14 - to decide which Calgary a given bus is on. Busses 0, 1,
-        * 2 and 4 are on the first Calgary (id 2), 6, 8, a and c are on the
-        * second (id 3), and then it repeats modulo 14.
-        */
-       rionodeid = (dev->bus->number % 14 > 4) ? 3 : 2;
-       /*
-        * register space address calculation as follows:
-        * FE0MB-8MB*OneBasedChassisNumber+1MB*(RioNodeId-ChassisBase)
-        * ChassisBase is always zero for x366/x260/x460
-        * RioNodeId is 2 for first Calgary, 3 for second Calgary
-        */
-       address = START_ADDRESS -
-               (0x800000 * (ONE_BASED_CHASSIS_NUM + dev->bus->number / 14)) +
-               (0x100000) * (rionodeid - CHASSIS_BASE);
-       return address;
-}
-
 static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
 {
        pci_dev_get(dev);
@@ -864,23 +865,15 @@ static void __init calgary_init_one_nontraslated(struct pci_dev *dev)
 
 static int __init calgary_init_one(struct pci_dev *dev)
 {
-       u32 address;
        void __iomem *bbar;
        int ret;
 
        BUG_ON(dev->bus->number >= MAX_PHB_BUS_NUM);
 
-       address = locate_register_space(dev);
-       /* map entire 1MB of Calgary config space */
-       bbar = ioremap_nocache(address, 1024 * 1024);
-       if (!bbar) {
-               ret = -ENODATA;
-               goto done;
-       }
-
+       bbar = busno_to_bbar(dev->bus->number);
        ret = calgary_setup_tar(dev, bbar);
        if (ret)
-               goto iounmap;
+               goto done;
 
        pci_dev_get(dev);
        dev->bus->self = dev;
@@ -888,17 +881,66 @@ static int __init calgary_init_one(struct pci_dev *dev)
 
        return 0;
 
-iounmap:
-       iounmap(bbar);
 done:
        return ret;
 }
 
+static int __init calgary_locate_bbars(void)
+{
+       int ret;
+       int rioidx, phb, bus;
+       void __iomem *bbar;
+       void __iomem *target;
+       unsigned long offset;
+       u8 start_bus, end_bus;
+       u32 val;
+
+       ret = -ENODATA;
+       for (rioidx = 0; rioidx < rio_table_hdr->num_rio_dev; rioidx++) {
+               struct rio_detail *rio = rio_devs[rioidx];
+
+               if ((rio->type != COMPAT_CALGARY) && (rio->type != ALT_CALGARY))
+                       continue;
+
+               /* map entire 1MB of Calgary config space */
+               bbar = ioremap_nocache(rio->BBAR, 1024 * 1024);
+               if (!bbar)
+                       goto error;
+
+               for (phb = 0; phb < PHBS_PER_CALGARY; phb++) {
+                       offset = phb_debug_offsets[phb] | PHB_DEBUG_STUFF_OFFSET;
+                       target = calgary_reg(bbar, offset);
+
+                       val = be32_to_cpu(readl(target));
+                       start_bus = (u8)((val & 0x00FF0000) >> 16);
+                       end_bus = (u8)((val & 0x0000FF00) >> 8);
+                       for (bus = start_bus; bus <= end_bus; bus++) {
+                               bus_info[bus].bbar = bbar;
+                               bus_info[bus].phbid = phb;
+                       }
+               }
+       }
+
+       return 0;
+
+error:
+       /* scan bus_info and iounmap any bbars we previously ioremap'd */
+       for (bus = 0; bus < ARRAY_SIZE(bus_info); bus++)
+               if (bus_info[bus].bbar)
+                       iounmap(bus_info[bus].bbar);
+
+       return ret;
+}
+
 static int __init calgary_init(void)
 {
-       int ret = -ENODEV;
+       int ret;
        struct pci_dev *dev = NULL;
 
+       ret = calgary_locate_bbars();
+       if (ret)
+               return ret;
+
        do {
                dev = pci_get_device(PCI_VENDOR_ID_IBM,
                                     PCI_DEVICE_ID_IBM_CALGARY,
@@ -921,7 +963,7 @@ static int __init calgary_init(void)
 
 error:
        do {
-               dev = pci_find_device_reverse(PCI_VENDOR_ID_IBM,
+               dev = pci_get_device_reverse(PCI_VENDOR_ID_IBM,
                                              PCI_DEVICE_ID_IBM_CALGARY,
                                              dev);
                if (!dev)
@@ -962,13 +1004,56 @@ static inline int __init determine_tce_table_size(u64 ram)
        return ret;
 }
 
+static int __init build_detail_arrays(void)
+{
+       unsigned long ptr;
+       int i, scal_detail_size, rio_detail_size;
+
+       if (rio_table_hdr->num_scal_dev > MAX_NUMNODES){
+               printk(KERN_WARNING
+                       "Calgary: MAX_NUMNODES too low! Defined as %d, "
+                       "but system has %d nodes.\n",
+                       MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+               return -ENODEV;
+       }
+
+       switch (rio_table_hdr->version){
+       case 2:
+               scal_detail_size = 11;
+               rio_detail_size = 13;
+               break;
+       case 3:
+               scal_detail_size = 12;
+               rio_detail_size = 15;
+               break;
+       default:
+               printk(KERN_WARNING
+                      "Calgary: Invalid Rio Grande Table Version: %d\n",
+                      rio_table_hdr->version);
+               return -EPROTO;
+       }
+
+       ptr = ((unsigned long)rio_table_hdr) + 3;
+       for (i = 0; i < rio_table_hdr->num_scal_dev;
+                   i++, ptr += scal_detail_size)
+               scal_devs[i] = (struct scal_detail *)ptr;
+
+       for (i = 0; i < rio_table_hdr->num_rio_dev;
+                   i++, ptr += rio_detail_size)
+               rio_devs[i] = (struct rio_detail *)ptr;
+
+       return 0;
+}
+
 void __init detect_calgary(void)
 {
        u32 val;
        int bus;
        void *tbl;
        int calgary_found = 0;
-       int phb = -1;
+       unsigned long ptr;
+       int offset;
+       int ret;
 
        /*
         * if the user specified iommu=off or iommu=soft or we found
@@ -977,25 +1062,47 @@ void __init detect_calgary(void)
        if (swiotlb || no_iommu || iommu_detected)
                return;
 
+       if (!use_calgary)
+               return;
+
        if (!early_pci_allowed())
                return;
 
+       ptr = (unsigned long)phys_to_virt(get_bios_ebda());
+
+       rio_table_hdr = NULL;
+       offset = 0x180;
+       while (offset) {
+               /* The block id is stored in the 2nd word */
+               if (*((unsigned short *)(ptr + offset + 2)) == 0x4752){
+                       /* set the pointer past the offset & block id */
+                       rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
+                       break;
+               }
+               /* The next offset is stored in the 1st word. 0 means no more */
+               offset = *((unsigned short *)(ptr + offset));
+       }
+       if (!rio_table_hdr) {
+               printk(KERN_ERR "Calgary: Unable to locate "
+                               "Rio Grande Table in EBDA - bailing!\n");
+               return;
+       }
+
+       ret = build_detail_arrays();
+       if (ret) {
+               printk(KERN_ERR "Calgary: build_detail_arrays ret %d\n", ret);
+               return;
+       }
+
        specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
 
        for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
                int dev;
                struct calgary_bus_info *info = &bus_info[bus];
-               info->phbid = -1;
 
                if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
                        continue;
 
-               /*
-                * There are 4 PHBs per Calgary chip.  Set phb to which phb (0-3)
-                * it is connected to releative to the clagary chip.
-                */
-               phb = (phb + 1) % PHBS_PER_CALGARY;
-
                if (info->translation_disabled)
                        continue;
 
@@ -1010,7 +1117,6 @@ void __init detect_calgary(void)
                                if (!tbl)
                                        goto cleanup;
                                info->tce_space = tbl;
-                               info->phbid = phb;
                                calgary_found = 1;
                                break;
                        }
index f8d857453f8afaf5fc839060cf1ad07dc69d13dc..683b7a5c1ab30170d716a99fe90c633187f4a827 100644 (file)
@@ -296,6 +296,11 @@ __init int iommu_setup(char *p)
                gart_parse_options(p);
 #endif
 
+#ifdef CONFIG_CALGARY_IOMMU
+               if (!strncmp(p, "calgary", 7))
+                       use_calgary = 1;
+#endif /* CONFIG_CALGARY_IOMMU */
+
                p += strcspn(p, ",");
                if (*p == ',')
                        ++p;
index 16261a8a3303ed044fba4b31eb612a9f9c3454aa..fc1960f1f243982f8ecd6746ce0bddb8828ab633 100644 (file)
@@ -601,10 +601,9 @@ void __init gart_iommu_init(void)
            (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
            !iommu_aperture ||
            (no_agp && init_k8_gatt(&info) < 0)) {
-               printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
                if (end_pfn > MAX_DMA32_PFN) {
                        printk(KERN_ERR "WARNING more than 4GB of memory "
-                                       "but IOMMU not available.\n"
+                                       "but GART IOMMU not available.\n"
                               KERN_ERR "WARNING 32bit PCI may malfunction.\n");
                }
                return;
index 7451a4c43c1681639c0aa5fbb2640d509ccac603..a418ee4c8c62a0d14102b22939dfa8c5b16d0f79 100644 (file)
@@ -108,17 +108,15 @@ void exit_idle(void)
  */
 static void default_idle(void)
 {
-       local_irq_enable();
-
        current_thread_info()->status &= ~TS_POLLING;
        smp_mb__after_clear_bit();
-       while (!need_resched()) {
-               local_irq_disable();
-               if (!need_resched())
-                       safe_halt();
-               else
-                       local_irq_enable();
-       }
+       local_irq_disable();
+       if (!need_resched()) {
+               /* Enables interrupts one instruction before HLT.
+                  x86 special cases this so there is no race. */
+               safe_halt();
+       } else
+               local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
 }
 
@@ -130,15 +128,7 @@ static void default_idle(void)
 static void poll_idle (void)
 {
        local_irq_enable();
-
-       asm volatile(
-               "2:"
-               "testl %0,%1;"
-               "rep; nop;"
-               "je 2b;"
-               : :
-               "i" (_TIF_NEED_RESCHED),
-               "m" (current_thread_info()->flags));
+       cpu_relax();
 }
 
 void cpu_idle_wait(void)
@@ -219,6 +209,12 @@ void cpu_idle (void)
                                idle = default_idle;
                        if (cpu_is_offline(smp_processor_id()))
                                play_dead();
+                       /*
+                        * Idle routines should keep interrupts disabled
+                        * from here on, until they go to idle.
+                        * Otherwise, idle callbacks can misfire.
+                        */
+                       local_irq_disable();
                        enter_idle();
                        idle();
                        /* In many cases the interrupt that ended idle
@@ -256,9 +252,16 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
 static void mwait_idle(void)
 {
-       local_irq_enable();
-       while (!need_resched())
-               mwait_idle_with_hints(0,0);
+       if (!need_resched()) {
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+               smp_mb();
+               if (!need_resched())
+                       __sti_mwait(0, 0);
+               else
+                       local_irq_enable();
+       } else {
+               local_irq_enable();
+       }
 }
 
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
index fc944b5e8f4a6050612a0c9220c713e2f47d3a51..af425a8049fb5620596a0bc61bf005d399df3e32 100644 (file)
@@ -471,8 +471,7 @@ void __init setup_arch(char **cmdline_p)
        if (LOADER_TYPE && INITRD_START) {
                if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
                        reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
-                       initrd_start =
-                               INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+                       initrd_start = INITRD_START + PAGE_OFFSET;
                        initrd_end = initrd_start+INITRD_SIZE;
                }
                else {
@@ -732,11 +731,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        /* Fix cpuid4 emulation for more */
        num_cache_leaves = 3;
 
-       /* When there is only one core no need to synchronize RDTSC */
-       if (num_possible_cpus() == 1)
-               set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
-       else
-               clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+       /* RDTSC can be speculated around */
+       clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
 }
 
 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -835,6 +831,15 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
                        set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
        }
 
+       if (cpu_has_ds) {
+               unsigned int l1, l2;
+               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+               if (!(l1 & (1<<11)))
+                       set_bit(X86_FEATURE_BTS, c->x86_capability);
+               if (!(l1 & (1<<12)))
+                       set_bit(X86_FEATURE_PEBS, c->x86_capability);
+       }
+
        n = c->extended_cpuid_level;
        if (n >= 0x80000008) {
                unsigned eax = cpuid_eax(0x80000008);
@@ -854,7 +859,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
                set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
        if (c->x86 == 6)
                set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
-       set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+       if (c->x86 == 15)
+               set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
+       else
+               clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
        c->x86_max_cores = intel_num_cpu_cores(c);
 
        srat_detect_node();
index 9f74c883568c419909b6542449161d5b109f6b51..af1ec4d23cf89131f16e5063f31c81a6810c63ba 100644 (file)
@@ -379,12 +379,17 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
                put_cpu();
                return 0;
        }
+
+       /* Can deadlock when called with interrupts disabled */
+       WARN_ON(irqs_disabled());
+
        spin_lock_bh(&call_lock);
        __smp_call_function_single(cpu, func, info, nonatomic, wait);
        spin_unlock_bh(&call_lock);
        put_cpu();
        return 0;
 }
+EXPORT_SYMBOL(smp_call_function_single);
 
 /*
  * this function sends a 'generic call function' IPI to all other CPUs
index 62c2e747af58006a917d279150254a280aa9f364..daf19332f0dd02ed7ab4257e5c887989dfeb0ef8 100644 (file)
@@ -60,6 +60,7 @@
 #include <asm/irq.h>
 #include <asm/hw_irq.h>
 #include <asm/numa.h>
+#include <asm/genapic.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -753,14 +754,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
 }
 
 struct create_idle {
+       struct work_struct work;
        struct task_struct *idle;
        struct completion done;
        int cpu;
 };
 
-void do_fork_idle(void *_c_idle)
+void do_fork_idle(struct work_struct *work)
 {
-       struct create_idle *c_idle = _c_idle;
+       struct create_idle *c_idle =
+               container_of(work, struct create_idle, work);
 
        c_idle->idle = fork_idle(c_idle->cpu);
        complete(&c_idle->done);
@@ -775,10 +778,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
        int timeout;
        unsigned long start_rip;
        struct create_idle c_idle = {
+               .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
                .cpu = cpu,
                .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
-       DECLARE_WORK(work, do_fork_idle, &c_idle);
 
        /* allocate memory for gdts of secondary cpus. Hotplug is considered */
        if (!cpu_gdt_descr[cpu].address &&
@@ -825,9 +828,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
         * thread.
         */
        if (!keventd_up() || current_is_keventd())
-               work.func(work.data);
+               c_idle.work.func(&c_idle.work);
        else {
-               schedule_work(&work);
+               schedule_work(&c_idle.work);
                wait_for_completion(&c_idle.done);
        }
 
@@ -1167,6 +1170,13 @@ int __cpuinit __cpu_up(unsigned int cpu)
 
        while (!cpu_isset(cpu, cpu_online_map))
                cpu_relax();
+
+       if (num_online_cpus() > 8 && genapic == &apic_flat) {
+               printk(KERN_WARNING
+                      "flat APIC routing can't be used with > 8 cpus\n");
+               BUG();
+       }
+
        err = 0;
 
        return err;
index e3ef544d2cfb514f1894c232c0e8d38e456a49f7..9f05bc9b2dad7023573de34d6f33260eb0c9fe33 100644 (file)
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0;
 static unsigned int cpufreq_init = 0;
 static struct work_struct cpufreq_delayed_get_work;
 
-static void handle_cpufreq_delayed_get(void *v)
+static void handle_cpufreq_delayed_get(struct work_struct *v)
 {
        unsigned int cpu;
        for_each_online_cpu(cpu) {
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = {
 
 static int __init cpufreq_tsc(void)
 {
-       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
        if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
                                       CPUFREQ_TRANSITION_NOTIFIER))
                cpufreq_init = 1;
index 0d65b22f229ccfbf895ff2aa12e3dd2d9977cdd8..a1641ffdffcf7bfa28b4d34d9eb36689dd52527e 100644 (file)
@@ -30,9 +30,9 @@
 #include <linux/kprobes.h>
 #include <linux/kexec.h>
 #include <linux/unwind.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/atomic.h>
 #include <asm/debugreg.h>
@@ -108,7 +108,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
        preempt_enable_no_resched();
 }
 
-static int kstack_depth_to_print = 12;
+int kstack_depth_to_print = 12;
 #ifdef CONFIG_STACK_UNWIND
 static int call_trace = 1;
 #else
@@ -225,16 +225,25 @@ static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
 {
        struct ops_and_data *oad = (struct ops_and_data *)context;
        int n = 0;
+       unsigned long sp = UNW_SP(info);
 
+       if (arch_unw_user_mode(info))
+               return -1;
        while (unwind(info) == 0 && UNW_PC(info)) {
                n++;
                oad->ops->address(oad->data, UNW_PC(info));
                if (arch_unw_user_mode(info))
                        break;
+               if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
+                   && sp > UNW_SP(info))
+                       break;
+               sp = UNW_SP(info);
        }
        return n;
 }
 
+#define MSG(txt) ops->warning(data, txt)
+
 /*
  * x86-64 can have upto three kernel stacks: 
  * process stack
@@ -248,11 +257,12 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
         return p > t && p < t + THREAD_SIZE - 3;
 }
 
-void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack,
+void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
+               unsigned long *stack,
                struct stacktrace_ops *ops, void *data)
 {
-       const unsigned cpu = smp_processor_id();
-       unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+       const unsigned cpu = get_cpu();
+       unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
        unsigned used = 0;
        struct thread_info *tinfo;
 
@@ -268,28 +278,30 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
                        if (unwind_init_frame_info(&info, tsk, regs) == 0)
                                unw_ret = dump_trace_unwind(&info, &oad);
                } else if (tsk == current)
-                       unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
+                       unw_ret = unwind_init_running(&info, dump_trace_unwind,
+                                                     &oad);
                else {
                        if (unwind_init_blocked(&info, tsk) == 0)
                                unw_ret = dump_trace_unwind(&info, &oad);
                }
                if (unw_ret > 0) {
                        if (call_trace == 1 && !arch_unw_user_mode(&info)) {
-                               ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
+                               ops->warning_symbol(data,
+                                            "DWARF2 unwinder stuck at %s",
                                             UNW_PC(&info));
                                if ((long)UNW_SP(&info) < 0) {
-                                       ops->warning(data, "Leftover inexact backtrace:\n");
+                                       MSG("Leftover inexact backtrace:");
                                        stack = (unsigned long *)UNW_SP(&info);
                                        if (!stack)
-                                               return;
+                                               goto out;
                                } else
-                                       ops->warning(data, "Full inexact backtrace again:\n");
+                                       MSG("Full inexact backtrace again:");
                        } else if (call_trace >= 1)
-                               return;
+                               goto out;
                        else
-                               ops->warning(data, "Full inexact backtrace again:\n");
+                               MSG("Full inexact backtrace again:");
                } else
-                       ops->warning(data, "Inexact backtrace:\n");
+                       MSG("Inexact backtrace:");
        }
        if (!stack) {
                unsigned long dummy;
@@ -297,12 +309,6 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
                if (tsk && tsk != current)
                        stack = (unsigned long *)tsk->thread.rsp;
        }
-       /*
-        * Align the stack pointer on word boundary, later loops
-        * rely on that (and corruption / debug info bugs can cause
-        * unaligned values here):
-        */
-       stack = (unsigned long *)((unsigned long)stack & ~(sizeof(long)-1));
 
        /*
         * Print function call entries within a stack. 'cond' is the
@@ -312,9 +318,9 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
 #define HANDLE_STACK(cond) \
        do while (cond) { \
                unsigned long addr = *stack++; \
-               if (oops_in_progress ?          \
-                       __kernel_text_address(addr) : \
-                       kernel_text_address(addr)) { \
+               /* Use unlocked access here because except for NMIs     \
+                  we should be already protected against module unloads */ \
+               if (__kernel_text_address(addr)) { \
                        /* \
                         * If the address is either in the text segment of the \
                         * kernel, or in the region which contains vmalloc'ed \
@@ -380,6 +386,8 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
        tinfo = current_thread_info();
        HANDLE_STACK (valid_stack_ptr(tinfo, stack));
 #undef HANDLE_STACK
+out:
+       put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
 
@@ -786,8 +794,7 @@ mem_parity_error(unsigned char reason, struct pt_regs * regs)
 {
        printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
                reason);
-       printk(KERN_EMERG "You probably have a hardware problem with your "
-               "RAM chips\n");
+       printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
 
        if (panic_on_unrecovered_nmi)
                panic("NMI: Not continuing");
index d9534e750d4fa548e7f2a016b94fb022ee7689c0..6a1f8f491e5d97b9341c89a250045e7786a86034 100644 (file)
@@ -51,15 +51,6 @@ SECTIONS
 
   RODATA
 
-#ifdef CONFIG_STACK_UNWIND
-  . = ALIGN(8);
-  .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
-       __start_unwind = .;
-       *(.eh_frame)
-       __end_unwind = .;
-  }
-#endif
-
   . = ALIGN(PAGE_SIZE);        /* Align data segment to page size boundary */
                                /* Data */
   .data : AT(ADDR(.data) - LOAD_OFFSET) {
index 92546c1526f1fece35fb8ed07bfeefc7b3414c0d..4a673f5397a05fbbf5aec90b5067bbafcd8e1ad5 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/topology.h>
 
 #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __syscall_clobber "r11","rcx","memory"
 
 int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
 seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
@@ -274,7 +275,6 @@ static void __cpuinit cpu_vsyscall_init(void *arg)
        vsyscall_set_cpu(raw_smp_processor_id());
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __cpuinit
 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
 {
@@ -283,13 +283,13 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
                smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1);
        return NOTIFY_DONE;
 }
-#endif
 
 static void __init map_vsyscall(void)
 {
        extern char __vsyscall_0;
        unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
 
+       /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
        __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
 }
 
index 06ae630de82b1138c948f0131e7a93b99c50cddf..bc503f506903a486a9a2fb4c56afd144466fff18 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/module.h>
 #include <asm/checksum.h>
 
-#define __force_inline inline __attribute__((always_inline))
-
 static inline unsigned short from32to16(unsigned a) 
 {
        unsigned short b = a >> 16; 
@@ -33,7 +31,7 @@ static inline unsigned short from32to16(unsigned a)
  * Unrolling to an 128 bytes inner loop.
  * Using interleaving with more registers to break the carry chains.
  */
-static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len)
+static unsigned do_csum(const unsigned char *buff, unsigned len)
 {
        unsigned odd, count;
        unsigned long result = 0;
index 50be90975d04a40e08f9a3c72329fbe9b7ec7cdd..2dbebd308347f21be2ad931ce480a6ade16e72b2 100644 (file)
@@ -40,13 +40,13 @@ EXPORT_SYMBOL(__delay);
 
 inline void __const_udelay(unsigned long xloops)
 {
-       __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
+       __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1);
 }
 EXPORT_SYMBOL(__const_udelay);
 
 void __udelay(unsigned long usecs)
 {
-       __const_udelay(usecs * 0x000010c6);  /* 2**32 / 1000000 */
+       __const_udelay(usecs * 0x000010c7);  /* 2**32 / 1000000 (rounded up) */
 }
 EXPORT_SYMBOL(__udelay);
 
index 3751b4788e288748112c05df0121a1186aac5fab..a65fc6f1dcaff5ec3dd876a609a4f56fd3d7abf5 100644 (file)
@@ -23,9 +23,9 @@
 #include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/smp.h>
 #include <asm/tlbflush.h>
@@ -96,7 +96,7 @@ void bust_spinlocks(int yes)
 static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
                                unsigned long error_code)
 { 
-       unsigned char __user *instr;
+       unsigned char *instr;
        int scan_more = 1;
        int prefetch = 0; 
        unsigned char *max_instr;
@@ -116,7 +116,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
                unsigned char instr_hi;
                unsigned char instr_lo;
 
-               if (__get_user(opcode, (char __user *)instr))
+               if (probe_kernel_address(instr, opcode))
                        break; 
 
                instr_hi = opcode & 0xf0; 
@@ -154,7 +154,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
                case 0x00:
                        /* Prefetch instruction is 0x0F0D or 0x0F18 */
                        scan_more = 0;
-                       if (__get_user(opcode, (char __user *)instr))
+                       if (probe_kernel_address(instr, opcode))
                                break;
                        prefetch = (instr_lo == 0xF) &&
                                (opcode == 0x0D || opcode == 0x18);
@@ -170,7 +170,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
 static int bad_address(void *p) 
 { 
        unsigned long dummy;
-       return __get_user(dummy, (unsigned long __user *)p);
+       return probe_kernel_address((unsigned long *)p, dummy);
 } 
 
 void dump_pagetable(unsigned long address)
index 4c0c00ef3ca769c027591b4d5b6483e9b13b9228..2968b90ef8adfec3d0316a3ef7b8d8e49628284d 100644 (file)
@@ -730,14 +730,15 @@ static __init int x8664_sysctl_init(void)
 __initcall(x8664_sysctl_init);
 #endif
 
-/* A pseudo VMAs to allow ptrace access for the vsyscall page.   This only
+/* A pseudo VMA to allow ptrace access for the vsyscall page.  This only
    covers the 64bit vsyscall page now. 32bit has a real VMA now and does
    not need special handling anymore. */
 
 static struct vm_area_struct gate_vma = {
        .vm_start = VSYSCALL_START,
-       .vm_end = VSYSCALL_END,
-       .vm_page_prot = PAGE_READONLY
+       .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
+       .vm_page_prot = PAGE_READONLY_EXEC,
+       .vm_flags = VM_READ | VM_EXEC
 };
 
 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
index 3e231d762aaa8527d112687c711747f98f4bac83..ccb91dd996a957c1dc26f183e095c3a8edbd30f1 100644 (file)
@@ -61,34 +61,40 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
        return base;
 } 
 
-
-static void flush_kernel_map(void *address) 
+static void cache_flush_page(void *adr)
 {
-       if (0 && address && cpu_has_clflush) {
-               /* is this worth it? */ 
-               int i;
-               for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
-                       asm volatile("clflush (%0)" :: "r" (address + i)); 
-       } else
-               asm volatile("wbinvd":::"memory"); 
-       if (address)
-               __flush_tlb_one(address);
-       else
-               __flush_tlb_all();
+       int i;
+       for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+               asm volatile("clflush (%0)" :: "r" (adr + i));
 }
 
+static void flush_kernel_map(void *arg)
+{
+       struct list_head *l = (struct list_head *)arg;
+       struct page *pg;
+
+       /* When clflush is available always use it because it is
+          much cheaper than WBINVD */
+       if (!cpu_has_clflush)
+               asm volatile("wbinvd" ::: "memory");
+       list_for_each_entry(pg, l, lru) {
+               void *adr = page_address(pg);
+               if (cpu_has_clflush)
+                       cache_flush_page(adr);
+               __flush_tlb_one(adr);
+       }
+}
 
-static inline void flush_map(unsigned long address)
+static inline void flush_map(struct list_head *l)
 {      
-       on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+       on_each_cpu(flush_kernel_map, l, 1, 1);
 }
 
-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
 
 static inline void save_page(struct page *fpage)
 {
-       fpage->lru.next = (struct list_head *)deferred_pages;
-       deferred_pages = fpage;
+       list_add(&fpage->lru, &deferred_pages);
 }
 
 /* 
@@ -207,18 +213,18 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 
 void global_flush_tlb(void)
 { 
-       struct page *dpage;
+       struct page *pg, *next;
+       struct list_head l;
 
        down_read(&init_mm.mmap_sem);
-       dpage = xchg(&deferred_pages, NULL);
+       list_replace_init(&deferred_pages, &l);
        up_read(&init_mm.mmap_sem);
 
-       flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
-       while (dpage) {
-               struct page *tmp = dpage;
-               dpage = (struct page *)dpage->lru.next;
-               ClearPagePrivate(tmp);
-               __free_page(tmp);
+       flush_map(&l);
+
+       list_for_each_entry_safe(pg, next, &l, lru) {
+               ClearPagePrivate(pg);
+               __free_page(pg);
        } 
 } 
 
index 83766a6bdee264f3c3a8da74bfa95ada55097062..a50f481116477e9552c82db45f728bc067dbbbbd 100644 (file)
@@ -19,11 +19,9 @@ config BLOCK
 
 if BLOCK
 
-#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
-#for instance.
 config LBD
        bool "Support for Large Block Devices"
-       depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML
+       depends on !64BIT
        help
          Say Y here if you want to attach large (bigger than 2TB) discs to
          your machine, or if you want to have a raid or loopback device
@@ -44,7 +42,7 @@ config BLK_DEV_IO_TRACE
 
 config LSF
        bool "Support for Large Single Files"
-       depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+       depends on !64BIT
        help
          Say Y here if you want to be able to handle very large files (bigger
          than 2TB), otherwise say N.
index 00242111a457e3b461d59e4542bdd86d8501db44..5934c4bfd52a3d9b71331785ac6fafc26ddef992 100644 (file)
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
  *
  * FIXME! dispatch queue is not a queue at all!
  */
-static void as_work_handler(void *data)
+static void as_work_handler(struct work_struct *work)
 {
-       struct request_queue *q = data;
+       struct as_data *ad = container_of(work, struct as_data, antic_work);
+       struct request_queue *q = ad->q;
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q)
        ad->antic_timer.function = as_antic_timeout;
        ad->antic_timer.data = (unsigned long)q;
        init_timer(&ad->antic_timer);
-       INIT_WORK(&ad->antic_work, as_work_handler, q);
+       INIT_WORK(&ad->antic_work, as_work_handler);
 
        INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
        INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
index 562ca7cbf858bf93777ef8029414768b170fe6af..d3679dd1d2201785e91194415d01736a16af54bc 100644 (file)
@@ -31,26 +31,24 @@ static unsigned int blktrace_seq __read_mostly = 1;
 /*
  * Send out a notify message.
  */
-static inline unsigned int trace_note(struct blk_trace *bt,
-               pid_t pid, int action,
-               const void *data, size_t len)
+static void trace_note(struct blk_trace *bt, pid_t pid, int action,
+                      const void *data, size_t len)
 {
        struct blk_io_trace *t;
-       int cpu = smp_processor_id();
 
        t = relay_reserve(bt->rchan, sizeof(*t) + len);
-       if (t == NULL)
-               return 0;
-
-       t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
-       t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
-       t->device = bt->dev;
-       t->action = action;
-       t->pid = pid;
-       t->cpu = cpu;
-       t->pdu_len = len;
-       memcpy((void *) t + sizeof(*t), data, len);
-       return blktrace_seq;
+       if (t) {
+               const int cpu = smp_processor_id();
+
+               t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+               t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+               t->device = bt->dev;
+               t->action = action;
+               t->pid = pid;
+               t->cpu = cpu;
+               t->pdu_len = len;
+               memcpy((void *) t + sizeof(*t), data, len);
+       }
 }
 
 /*
@@ -59,9 +57,8 @@ static inline unsigned int trace_note(struct blk_trace *bt,
  */
 static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
 {
-       tsk->btrace_seq = trace_note(bt, tsk->pid,
-                       BLK_TN_PROCESS,
-                       tsk->comm, sizeof(tsk->comm));
+       tsk->btrace_seq = blktrace_seq;
+       trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
 }
 
 static void trace_note_time(struct blk_trace *bt)
@@ -397,8 +394,7 @@ err:
        if (bt) {
                if (bt->dropped_file)
                        debugfs_remove(bt->dropped_file);
-               if (bt->sequence)
-                       free_percpu(bt->sequence);
+               free_percpu(bt->sequence);
                if (bt->rchan)
                        relay_close(bt->rchan);
                kfree(bt);
index e9019ed39b7352a5416aa38a9a15623be744293d..78c6b312bd3030d3c86a9c35e3facc842b73e3eb 100644 (file)
@@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125;
 #define RQ_CIC(rq)             ((struct cfq_io_context*)(rq)->elevator_private)
 #define RQ_CFQQ(rq)            ((rq)->elevator_private2)
 
-static kmem_cache_t *cfq_pool;
-static kmem_cache_t *cfq_ioc_pool;
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
 
 static DEFINE_PER_CPU(unsigned long, ioc_count);
 static struct completion *ioc_gone;
@@ -1840,9 +1840,11 @@ queue_fail:
        return 1;
 }
 
-static void cfq_kick_queue(void *data)
+static void cfq_kick_queue(struct work_struct *work)
 {
-       request_queue_t *q = data;
+       struct cfq_data *cfqd =
+               container_of(work, struct cfq_data, unplug_work);
+       request_queue_t *q = cfqd->queue;
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
@@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q)
        cfqd->idle_class_timer.function = cfq_idle_class_timer;
        cfqd->idle_class_timer.data = (unsigned long) cfqd;
 
-       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
+       INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
 
        cfqd->cfq_quantum = cfq_quantum;
        cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
index 0f82e12f7b678553b51189374bfb562c676de919..31512cd9f3ad8eedbf6b8aacb3da2840cca24072 100644 (file)
@@ -34,7 +34,7 @@
  */
 #include <scsi/scsi_cmnd.h>
 
-static void blk_unplug_work(void *data);
+static void blk_unplug_work(struct work_struct *work);
 static void blk_unplug_timeout(unsigned long data);
 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
 static void init_request_from_bio(struct request *req, struct bio *bio);
@@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node);
 /*
  * For the allocated request tables
  */
-static kmem_cache_t *request_cachep;
+static struct kmem_cache *request_cachep;
 
 /*
  * For queue allocation
  */
-static kmem_cache_t *requestq_cachep;
+static struct kmem_cache *requestq_cachep;
 
 /*
  * For io context allocations
  */
-static kmem_cache_t *iocontext_cachep;
+static struct kmem_cache *iocontext_cachep;
 
 /*
  * Controlling structure to kblockd
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
        if (q->unplug_delay == 0)
                q->unplug_delay = 1;
 
-       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+       INIT_WORK(&q->unplug_work, blk_unplug_work);
 
        q->unplug_timer.function = blk_unplug_timeout;
        q->unplug_timer.data = (unsigned long)q;
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
        }
 }
 
-static void blk_unplug_work(void *data)
+static void blk_unplug_work(struct work_struct *work)
 {
-       request_queue_t *q = data;
+       request_queue_t *q = container_of(work, request_queue_t, unplug_work);
 
        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
                                q->rq.count[READ] + q->rq.count[WRITE]);
@@ -3459,8 +3459,6 @@ static void blk_done_softirq(struct softirq_action *h)
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
                          void *hcpu)
 {
@@ -3486,8 +3484,6 @@ static struct notifier_block __devinitdata blk_cpu_notifier = {
        .notifier_call  = blk_cpu_notify,
 };
 
-#endif /* CONFIG_HOTPLUG_CPU */
-
 /**
  * blk_complete_request - end I/O on a request
  * @req:      the request being processed
index 5493c2fbbab177335814a24362b95763eb5dc820..b3e210723a71f637605852b7d8136ef594af95d2 100644 (file)
@@ -277,7 +277,7 @@ static int sg_io(struct file *file, request_queue_t *q,
        if (rq->bio)
                blk_queue_bounce(q, &rq->bio);
 
-       rq->timeout = (hdr->timeout * HZ) / 1000;
+       rq->timeout = jiffies_to_msecs(hdr->timeout);
        if (!rq->timeout)
                rq->timeout = q->sg_timeout;
        if (!rq->timeout)
index cbae8392ce11b848e5573194eb8591a175815822..92ba249f3a5bf6bfc64d4aca0200ad9812bac999 100644 (file)
@@ -39,6 +39,17 @@ config CRYPTO_HMAC
          HMAC: Keyed-Hashing for Message Authentication (RFC2104).
          This is required for IPSec.
 
+config CRYPTO_XCBC
+       tristate "XCBC support"
+       depends on EXPERIMENTAL
+       select CRYPTO_HASH
+       select CRYPTO_MANAGER
+       help
+         XCBC: Keyed-Hashing with encryption algorithm
+               http://www.ietf.org/rfc/rfc3566.txt
+               http://csrc.nist.gov/encryption/modes/proposedmodes/
+                xcbc-mac/xcbc-mac-spec.pdf
+
 config CRYPTO_NULL
        tristate "Null algorithms"
        select CRYPTO_ALGAPI
@@ -128,6 +139,16 @@ config CRYPTO_TGR192
          See also:
          <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
 
+config CRYPTO_GF128MUL
+       tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       help
+         Efficient table driven implementation of multiplications in the
+         field GF(2^128).  This is needed by some cypher modes. This
+         option will be selected automatically if you select such a
+         cipher mode.  Only select this option by hand if you expect to load
+         an external module that requires these functions.
+
 config CRYPTO_ECB
        tristate "ECB support"
        select CRYPTO_BLKCIPHER
@@ -147,6 +168,19 @@ config CRYPTO_CBC
          CBC: Cipher Block Chaining mode
          This block cipher algorithm is required for IPSec.
 
+config CRYPTO_LRW
+       tristate "LRW support (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_MANAGER
+       select CRYPTO_GF128MUL
+       help
+         LRW: Liskov Rivest Wagner, a tweakable, non malleable, non movable
+         narrow block cipher mode for dm-crypt.  Use it with cipher
+         specification string aes-lrw-benbi, the key must be 256, 320 or 384.
+         The first 128, 192 or 256 bits in the key are used for AES and the
+         rest is used to tie each cipher block to its logical position.
+
 config CRYPTO_DES
        tristate "DES and Triple DES EDE cipher algorithms"
        select CRYPTO_ALGAPI
index 72366208e291cbbbbbfc89f5c4ddbbb26ec6e5ff..60e3d24f61f56a28c9c2e91eb84a4cac05671366 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
+obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
 obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
@@ -23,8 +24,10 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
 obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
 obj-$(CONFIG_CRYPTO_WP512) += wp512.o
 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
+obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
 obj-$(CONFIG_CRYPTO_ECB) += ecb.o
 obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_LRW) += lrw.o
 obj-$(CONFIG_CRYPTO_DES) += des.o
 obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
index 4fb7fa45cb0de649194262c5f6061669b3391a80..8c446871cd5b13998294850338bb4ce93930f069 100644 (file)
@@ -466,23 +466,8 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
        kfree(tfm);
 }
 
-int crypto_alg_available(const char *name, u32 flags)
-{
-       int ret = 0;
-       struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0,
-                                                      CRYPTO_ALG_ASYNC);
-       
-       if (!IS_ERR(alg)) {
-               crypto_mod_put(alg);
-               ret = 1;
-       }
-       
-       return ret;
-}
-
 EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
 EXPORT_SYMBOL_GPL(crypto_free_tfm);
-EXPORT_SYMBOL_GPL(crypto_alg_available);
 
 int crypto_has_alg(const char *name, u32 type, u32 mask)
 {
index 9b5b1560106899a480750ff464af9f1921efdb99..2ebffb84f1d99552bc707162eeb26a9d08f4277f 100644 (file)
@@ -40,9 +40,10 @@ struct cryptomgr_param {
        char template[CRYPTO_MAX_ALG_NAME];
 };
 
-static void cryptomgr_probe(void *data)
+static void cryptomgr_probe(struct work_struct *work)
 {
-       struct cryptomgr_param *param = data;
+       struct cryptomgr_param *param =
+               container_of(work, struct cryptomgr_param, work);
        struct crypto_template *tmpl;
        struct crypto_instance *inst;
        int err;
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
        param->larval.type = larval->alg.cra_flags;
        param->larval.mask = larval->mask;
 
-       INIT_WORK(&param->work, cryptomgr_probe, param);
+       INIT_WORK(&param->work, cryptomgr_probe);
        schedule_work(&param->work);
 
        return NOTIFY_STOP;
index 0155a94e4b15432b636559124370436fa8edf5af..8f4593268ce0bf5c5ef2e9e47d8ab72789adb842 100644 (file)
 #include "internal.h"
 #include "scatterwalk.h"
 
-void crypto_digest_init(struct crypto_tfm *tfm)
-{
-       struct crypto_hash *hash = crypto_hash_cast(tfm);
-       struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-
-       crypto_hash_init(&desc);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_init);
-
-void crypto_digest_update(struct crypto_tfm *tfm,
-                         struct scatterlist *sg, unsigned int nsg)
-{
-       struct crypto_hash *hash = crypto_hash_cast(tfm);
-       struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-       unsigned int nbytes = 0;
-       unsigned int i;
-
-       for (i = 0; i < nsg; i++)
-               nbytes += sg[i].length;
-
-       crypto_hash_update(&desc, sg, nbytes);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_update);
-
-void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-{
-       struct crypto_hash *hash = crypto_hash_cast(tfm);
-       struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-
-       crypto_hash_final(&desc, out);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_final);
-
-void crypto_digest_digest(struct crypto_tfm *tfm,
-                         struct scatterlist *sg, unsigned int nsg, u8 *out)
-{
-       struct crypto_hash *hash = crypto_hash_cast(tfm);
-       struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
-       unsigned int nbytes = 0;
-       unsigned int i;
-
-       for (i = 0; i < nsg; i++)
-               nbytes += sg[i].length;
-
-       crypto_hash_digest(&desc, sg, nbytes, out);
-}
-EXPORT_SYMBOL_GPL(crypto_digest_digest);
-
 static int init(struct hash_desc *desc)
 {
        struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
new file mode 100644 (file)
index 0000000..0a2aadf
--- /dev/null
@@ -0,0 +1,466 @@
+/* gf128mul.c - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue 31/01/2006
+
+ This file provides fast multiplication in GF(128) as required by several
+ cryptographic authentication modes
+*/
+
+#include <crypto/gf128mul.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define gf128mul_dat(q) { \
+       q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
+       q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
+       q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
+       q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
+       q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
+       q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
+       q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
+       q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
+       q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
+       q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
+       q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
+       q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
+       q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
+       q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
+       q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
+       q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
+       q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
+       q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
+       q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
+       q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
+       q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
+       q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
+       q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
+       q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
+       q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
+       q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
+       q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
+       q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
+       q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
+       q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
+       q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
+       q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
+}
+
+/*     Given the value i in 0..255 as the byte overflow when a field element
+    in GHASH is multipled by x^8, this function will return the values that
+    are generated in the lo 16-bit word of the field value by applying the
+    modular polynomial. The values lo_byte and hi_byte are returned via the
+    macro xp_fun(lo_byte, hi_byte) so that the values can be assembled into
+    memory as required by a suitable definition of this macro operating on
+    the table above
+*/
+
+#define xx(p, q)       0x##p##q
+
+#define xda_bbe(i) ( \
+       (i & 0x80 ? xx(43, 80) : 0) ^ (i & 0x40 ? xx(21, c0) : 0) ^ \
+       (i & 0x20 ? xx(10, e0) : 0) ^ (i & 0x10 ? xx(08, 70) : 0) ^ \
+       (i & 0x08 ? xx(04, 38) : 0) ^ (i & 0x04 ? xx(02, 1c) : 0) ^ \
+       (i & 0x02 ? xx(01, 0e) : 0) ^ (i & 0x01 ? xx(00, 87) : 0) \
+)
+
+#define xda_lle(i) ( \
+       (i & 0x80 ? xx(e1, 00) : 0) ^ (i & 0x40 ? xx(70, 80) : 0) ^ \
+       (i & 0x20 ? xx(38, 40) : 0) ^ (i & 0x10 ? xx(1c, 20) : 0) ^ \
+       (i & 0x08 ? xx(0e, 10) : 0) ^ (i & 0x04 ? xx(07, 08) : 0) ^ \
+       (i & 0x02 ? xx(03, 84) : 0) ^ (i & 0x01 ? xx(01, c2) : 0) \
+)
+
+static const u16 gf128mul_table_lle[256] = gf128mul_dat(xda_lle);
+static const u16 gf128mul_table_bbe[256] = gf128mul_dat(xda_bbe);
+
+/* These functions multiply a field element by x, by x^4 and by x^8
+ * in the polynomial field representation. It uses 32-bit word operations
+ * to gain speed but compensates for machine endianess and hence works
+ * correctly on both styles of machine.
+ */
+
+static void gf128mul_x_lle(be128 *r, const be128 *x)
+{
+       u64 a = be64_to_cpu(x->a);
+       u64 b = be64_to_cpu(x->b);
+       u64 _tt = gf128mul_table_lle[(b << 7) & 0xff];
+
+       r->b = cpu_to_be64((b >> 1) | (a << 63));
+       r->a = cpu_to_be64((a >> 1) ^ (_tt << 48));
+}
+
+static void gf128mul_x_bbe(be128 *r, const be128 *x)
+{
+       u64 a = be64_to_cpu(x->a);
+       u64 b = be64_to_cpu(x->b);
+       u64 _tt = gf128mul_table_bbe[a >> 63];
+
+       r->a = cpu_to_be64((a << 1) | (b >> 63));
+       r->b = cpu_to_be64((b << 1) ^ _tt);
+}
+
+static void gf128mul_x8_lle(be128 *x)
+{
+       u64 a = be64_to_cpu(x->a);
+       u64 b = be64_to_cpu(x->b);
+       u64 _tt = gf128mul_table_lle[b & 0xff];
+
+       x->b = cpu_to_be64((b >> 8) | (a << 56));
+       x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+static void gf128mul_x8_bbe(be128 *x)
+{
+       u64 a = be64_to_cpu(x->a);
+       u64 b = be64_to_cpu(x->b);
+       u64 _tt = gf128mul_table_bbe[a >> 56];
+
+       x->a = cpu_to_be64((a << 8) | (b >> 56));
+       x->b = cpu_to_be64((b << 8) ^ _tt);
+}
+
+void gf128mul_lle(be128 *r, const be128 *b)
+{
+       be128 p[8];
+       int i;
+
+       p[0] = *r;
+       for (i = 0; i < 7; ++i)
+               gf128mul_x_lle(&p[i + 1], &p[i]);
+
+       memset(r, 0, sizeof(r));
+       for (i = 0;;) {
+               u8 ch = ((u8 *)b)[15 - i];
+
+               if (ch & 0x80)
+                       be128_xor(r, r, &p[0]);
+               if (ch & 0x40)
+                       be128_xor(r, r, &p[1]);
+               if (ch & 0x20)
+                       be128_xor(r, r, &p[2]);
+               if (ch & 0x10)
+                       be128_xor(r, r, &p[3]);
+               if (ch & 0x08)
+                       be128_xor(r, r, &p[4]);
+               if (ch & 0x04)
+                       be128_xor(r, r, &p[5]);
+               if (ch & 0x02)
+                       be128_xor(r, r, &p[6]);
+               if (ch & 0x01)
+                       be128_xor(r, r, &p[7]);
+
+               if (++i >= 16)
+                       break;
+
+               gf128mul_x8_lle(r);
+       }
+}
+EXPORT_SYMBOL(gf128mul_lle);
+
+void gf128mul_bbe(be128 *r, const be128 *b)
+{
+       be128 p[8];
+       int i;
+
+       p[0] = *r;
+       for (i = 0; i < 7; ++i)
+               gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+       memset(r, 0, sizeof(r));
+       for (i = 0;;) {
+               u8 ch = ((u8 *)b)[i];
+
+               if (ch & 0x80)
+                       be128_xor(r, r, &p[7]);
+               if (ch & 0x40)
+                       be128_xor(r, r, &p[6]);
+               if (ch & 0x20)
+                       be128_xor(r, r, &p[5]);
+               if (ch & 0x10)
+                       be128_xor(r, r, &p[4]);
+               if (ch & 0x08)
+                       be128_xor(r, r, &p[3]);
+               if (ch & 0x04)
+                       be128_xor(r, r, &p[2]);
+               if (ch & 0x02)
+                       be128_xor(r, r, &p[1]);
+               if (ch & 0x01)
+                       be128_xor(r, r, &p[0]);
+
+               if (++i >= 16)
+                       break;
+
+               gf128mul_x8_bbe(r);
+       }
+}
+EXPORT_SYMBOL(gf128mul_bbe);
+
+/*      This version uses 64k bytes of table space.
+    A 16 byte buffer has to be multiplied by a 16 byte key
+    value in GF(128).  If we consider a GF(128) value in
+    the buffer's lowest byte, we can construct a table of
+    the 256 16 byte values that result from the 256 values
+    of this byte.  This requires 4096 bytes. But we also
+    need tables for each of the 16 higher bytes in the
+    buffer as well, which makes 64 kbytes in total.
+*/
+/* additional explanation
+ * t[0][BYTE] contains g*BYTE
+ * t[1][BYTE] contains g*x^8*BYTE
+ *  ..
+ * t[15][BYTE] contains g*x^120*BYTE */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
+{
+       struct gf128mul_64k *t;
+       int i, j, k;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               goto out;
+
+       for (i = 0; i < 16; i++) {
+               t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+               if (!t->t[i]) {
+                       gf128mul_free_64k(t);
+                       t = NULL;
+                       goto out;
+               }
+       }
+
+       t->t[0]->t[128] = *g;
+       for (j = 64; j > 0; j >>= 1)
+               gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
+
+       for (i = 0;;) {
+               for (j = 2; j < 256; j += j)
+                       for (k = 1; k < j; ++k)
+                               be128_xor(&t->t[i]->t[j + k],
+                                         &t->t[i]->t[j], &t->t[i]->t[k]);
+
+               if (++i >= 16)
+                       break;
+
+               for (j = 128; j > 0; j >>= 1) {
+                       t->t[i]->t[j] = t->t[i - 1]->t[j];
+                       gf128mul_x8_lle(&t->t[i]->t[j]);
+               }
+       }
+
+out:
+       return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_lle);
+
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
+{
+       struct gf128mul_64k *t;
+       int i, j, k;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               goto out;
+
+       for (i = 0; i < 16; i++) {
+               t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+               if (!t->t[i]) {
+                       gf128mul_free_64k(t);
+                       t = NULL;
+                       goto out;
+               }
+       }
+
+       t->t[0]->t[1] = *g;
+       for (j = 1; j <= 64; j <<= 1)
+               gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
+
+       for (i = 0;;) {
+               for (j = 2; j < 256; j += j)
+                       for (k = 1; k < j; ++k)
+                               be128_xor(&t->t[i]->t[j + k],
+                                         &t->t[i]->t[j], &t->t[i]->t[k]);
+
+               if (++i >= 16)
+                       break;
+
+               for (j = 128; j > 0; j >>= 1) {
+                       t->t[i]->t[j] = t->t[i - 1]->t[j];
+                       gf128mul_x8_bbe(&t->t[i]->t[j]);
+               }
+       }
+
+out:
+       return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_bbe);
+
+void gf128mul_free_64k(struct gf128mul_64k *t)
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               kfree(t->t[i]);
+       kfree(t);
+}
+EXPORT_SYMBOL(gf128mul_free_64k);
+
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
+{
+       u8 *ap = (u8 *)a;
+       be128 r[1];
+       int i;
+
+       *r = t->t[0]->t[ap[0]];
+       for (i = 1; i < 16; ++i)
+               be128_xor(r, r, &t->t[i]->t[ap[i]]);
+       *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_lle);
+
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
+{
+       u8 *ap = (u8 *)a;
+       be128 r[1];
+       int i;
+
+       *r = t->t[0]->t[ap[15]];
+       for (i = 1; i < 16; ++i)
+               be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
+       *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_bbe);
+
+/*      This version uses 4k bytes of table space.
+    A 16 byte buffer has to be multiplied by a 16 byte key
+    value in GF(128).  If we consider a GF(128) value in a
+    single byte, we can construct a table of the 256 16 byte
+    values that result from the 256 values of this byte.
+    This requires 4096 bytes. If we take the highest byte in
+    the buffer and use this table to get the result, we then
+    have to multiply by x^120 to get the final value. For the
+    next highest byte the result has to be multiplied by x^112
+    and so on. But we can do this by accumulating the result
+    in an accumulator starting with the result for the top
+    byte.  We repeatedly multiply the accumulator value by
+    x^8 and then add in (i.e. xor) the 16 bytes of the next
+    lower byte in the buffer, stopping when we reach the
+    lowest byte. This requires a 4096 byte table.
+*/
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
+{
+       struct gf128mul_4k *t;
+       int j, k;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               goto out;
+
+       t->t[128] = *g;
+       for (j = 64; j > 0; j >>= 1)
+               gf128mul_x_lle(&t->t[j], &t->t[j+j]);
+
+       for (j = 2; j < 256; j += j)
+               for (k = 1; k < j; ++k)
+                       be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+       return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_lle);
+
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
+{
+       struct gf128mul_4k *t;
+       int j, k;
+
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (!t)
+               goto out;
+
+       t->t[1] = *g;
+       for (j = 1; j <= 64; j <<= 1)
+               gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
+
+       for (j = 2; j < 256; j += j)
+               for (k = 1; k < j; ++k)
+                       be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+       return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_bbe);
+
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t)
+{
+       u8 *ap = (u8 *)a;
+       be128 r[1];
+       int i = 15;
+
+       *r = t->t[ap[15]];
+       while (i--) {
+               gf128mul_x8_lle(r);
+               be128_xor(r, r, &t->t[ap[i]]);
+       }
+       *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_lle);
+
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t)
+{
+       u8 *ap = (u8 *)a;
+       be128 r[1];
+       int i = 0;
+
+       *r = t->t[ap[0]];
+       while (++i < 16) {
+               gf128mul_x8_bbe(r);
+               be128_xor(r, r, &t->t[ap[i]]);
+       }
+       *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_bbe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/crypto/lrw.c b/crypto/lrw.c
new file mode 100644 (file)
index 0000000..5664258
--- /dev/null
@@ -0,0 +1,301 @@
+/* LRW: as defined by Cyril Guyot in
+ *     http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
+ *
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based om ecb.c
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/* This implementation is checked against the test vectors in the above
+ * document and by a test vector provided by Ken Buchanan at
+ * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
+ *
+ * The test vectors are included in the testing module tcrypt.[ch] */
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
+
+struct priv {
+       struct crypto_cipher *child;
+       /* optimizes multiplying a random (non incrementing, as at the
+        * start of a new sector) value with key2, we could also have
+        * used 4k optimization tables or no optimization at all. In the
+        * latter case we would have to store key2 here */
+       struct gf128mul_64k *table;
+       /* stores:
+        *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+        *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+        *  key2*{ 0,0,...1,1,1,1,1 }, etc
+        * needed for optimized multiplication of incrementing values
+        * with key2 */
+       be128 mulinc[128];
+};
+
+static inline void setbit128_bbe(void *b, int bit)
+{
+       __set_bit(bit ^ 0x78, b);
+}
+
+static int setkey(struct crypto_tfm *parent, const u8 *key,
+                 unsigned int keylen)
+{
+       struct priv *ctx = crypto_tfm_ctx(parent);
+       struct crypto_cipher *child = ctx->child;
+       int err, i;
+       be128 tmp = { 0 };
+       int bsize = crypto_cipher_blocksize(child);
+
+       crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+       crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+                                      CRYPTO_TFM_REQ_MASK);
+       if ((err = crypto_cipher_setkey(child, key, keylen - bsize)))
+               return err;
+       crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
+                                    CRYPTO_TFM_RES_MASK);
+
+       if (ctx->table)
+               gf128mul_free_64k(ctx->table);
+
+       /* initialize multiplication table for Key2 */
+       ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize));
+       if (!ctx->table)
+               return -ENOMEM;
+
+       /* initialize optimization table */
+       for (i = 0; i < 128; i++) {
+               setbit128_bbe(&tmp, i);
+               ctx->mulinc[i] = tmp;
+               gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
+       }
+
+       return 0;
+}
+
+struct sinfo {
+       be128 t;
+       struct crypto_tfm *tfm;
+       void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+};
+
+static inline void inc(be128 *iv)
+{
+       if (!(iv->b = cpu_to_be64(be64_to_cpu(iv->b) + 1)))
+               iv->a = cpu_to_be64(be64_to_cpu(iv->a) + 1);
+}
+
+static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
+{
+       be128_xor(dst, &s->t, src);             /* PP <- T xor P */
+       s->fn(s->tfm, dst, dst);                /* CC <- E(Key2,PP) */
+       be128_xor(dst, dst, &s->t);             /* C <- T xor CC */
+}
+
+/* this returns the number of consequative 1 bits starting
+ * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
+static inline int get_index128(be128 *block)
+{
+       int x;
+       __be32 *p = (__be32 *) block;
+
+       for (p += 3, x = 0; x < 128; p--, x += 32) {
+               u32 val = be32_to_cpup(p);
+
+               if (!~val)
+                       continue;
+
+               return x + ffz(val);
+       }
+
+       return x;
+}
+
+static int crypt(struct blkcipher_desc *d,
+                struct blkcipher_walk *w, struct priv *ctx,
+                void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+{
+       int err;
+       unsigned int avail;
+       const int bs = crypto_cipher_blocksize(ctx->child);
+       struct sinfo s = {
+               .tfm = crypto_cipher_tfm(ctx->child),
+               .fn = fn
+       };
+       be128 *iv;
+       u8 *wsrc;
+       u8 *wdst;
+
+       err = blkcipher_walk_virt(d, w);
+       if (!(avail = w->nbytes))
+               return err;
+
+       wsrc = w->src.virt.addr;
+       wdst = w->dst.virt.addr;
+
+       /* calculate first value of T */
+       iv = (be128 *)w->iv;
+       s.t = *iv;
+
+       /* T <- I*Key2 */
+       gf128mul_64k_bbe(&s.t, ctx->table);
+
+       goto first;
+
+       for (;;) {
+               do {
+                       /* T <- I*Key2, using the optimization
+                        * discussed in the specification */
+                       be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]);
+                       inc(iv);
+
+first:
+                       lrw_round(&s, wdst, wsrc);
+
+                       wsrc += bs;
+                       wdst += bs;
+               } while ((avail -= bs) >= bs);
+
+               err = blkcipher_walk_done(d, w, avail);
+               if (!(avail = w->nbytes))
+                       break;
+
+               wsrc = w->src.virt.addr;
+               wdst = w->dst.virt.addr;
+       }
+
+       return err;
+}
+
+static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                  struct scatterlist *src, unsigned int nbytes)
+{
+       struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk w;
+
+       blkcipher_walk_init(&w, dst, src, nbytes);
+       return crypt(desc, &w, ctx,
+                    crypto_cipher_alg(ctx->child)->cia_encrypt);
+}
+
+static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                  struct scatterlist *src, unsigned int nbytes)
+{
+       struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk w;
+
+       blkcipher_walk_init(&w, dst, src, nbytes);
+       return crypt(desc, &w, ctx,
+                    crypto_cipher_alg(ctx->child)->cia_decrypt);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_instance *inst = (void *)tfm->__crt_alg;
+       struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+       struct priv *ctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+
+       tfm = crypto_spawn_tfm(spawn);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       if (crypto_tfm_alg_blocksize(tfm) != 16) {
+               *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
+               return -EINVAL;
+       }
+
+       ctx->child = crypto_cipher_cast(tfm);
+       return 0;
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+       struct priv *ctx = crypto_tfm_ctx(tfm);
+       if (ctx->table)
+               gf128mul_free_64k(ctx->table);
+       crypto_free_cipher(ctx->child);
+}
+
+static struct crypto_instance *alloc(void *param, unsigned int len)
+{
+       struct crypto_instance *inst;
+       struct crypto_alg *alg;
+
+       alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
+                                 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+       if (IS_ERR(alg))
+               return ERR_PTR(PTR_ERR(alg));
+
+       inst = crypto_alloc_instance("lrw", alg);
+       if (IS_ERR(inst))
+               goto out_put_alg;
+
+       inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
+       inst->alg.cra_priority = alg->cra_priority;
+       inst->alg.cra_blocksize = alg->cra_blocksize;
+
+       if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
+       else inst->alg.cra_alignmask = alg->cra_alignmask;
+       inst->alg.cra_type = &crypto_blkcipher_type;
+
+       if (!(alg->cra_blocksize % 4))
+               inst->alg.cra_alignmask |= 3;
+       inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
+       inst->alg.cra_blkcipher.min_keysize =
+               alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
+       inst->alg.cra_blkcipher.max_keysize =
+               alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
+
+       inst->alg.cra_ctxsize = sizeof(struct priv);
+
+       inst->alg.cra_init = init_tfm;
+       inst->alg.cra_exit = exit_tfm;
+
+       inst->alg.cra_blkcipher.setkey = setkey;
+       inst->alg.cra_blkcipher.encrypt = encrypt;
+       inst->alg.cra_blkcipher.decrypt = decrypt;
+
+out_put_alg:
+       crypto_mod_put(alg);
+       return inst;
+}
+
+static void free(struct crypto_instance *inst)
+{
+       crypto_drop_spawn(crypto_instance_ctx(inst));
+       kfree(inst);
+}
+
+static struct crypto_template crypto_tmpl = {
+       .name = "lrw",
+       .alloc = alloc,
+       .free = free,
+       .module = THIS_MODULE,
+};
+
+static int __init crypto_module_init(void)
+{
+       return crypto_register_template(&crypto_tmpl);
+}
+
+static void __exit crypto_module_exit(void)
+{
+       crypto_unregister_template(&crypto_tmpl);
+}
+
+module_init(crypto_module_init);
+module_exit(crypto_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LRW block cipher mode");
index 83307420d31c873398006cbd85286ebcf50e2965..d671e8942b1f7d2c774a46a56aada52bf76dca39 100644 (file)
@@ -906,6 +906,10 @@ static void do_test(void)
                            AES_CBC_ENC_TEST_VECTORS);
                test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
                            AES_CBC_DEC_TEST_VECTORS);
+               test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
+                           AES_LRW_ENC_TEST_VECTORS);
+               test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
+                           AES_LRW_DEC_TEST_VECTORS);
 
                //CAST5
                test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template,
@@ -977,6 +981,9 @@ static void do_test(void)
                test_hash("hmac(sha256)", hmac_sha256_tv_template,
                          HMAC_SHA256_TEST_VECTORS);
 
+               test_hash("xcbc(aes)", aes_xcbc128_tv_template,
+                         XCBC_AES_TEST_VECTORS);
+
                test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS);
                break;
 
@@ -1052,6 +1059,10 @@ static void do_test(void)
                            AES_CBC_ENC_TEST_VECTORS);
                test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template,
                            AES_CBC_DEC_TEST_VECTORS);
+               test_cipher("lrw(aes)", ENCRYPT, aes_lrw_enc_tv_template,
+                           AES_LRW_ENC_TEST_VECTORS);
+               test_cipher("lrw(aes)", DECRYPT, aes_lrw_dec_tv_template,
+                           AES_LRW_DEC_TEST_VECTORS);
                break;
 
        case 11:
@@ -1191,6 +1202,10 @@ static void do_test(void)
                                  aes_speed_template);
                test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
                                  aes_speed_template);
+               test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+                                 aes_lrw_speed_template);
+               test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+                                 aes_lrw_speed_template);
                break;
 
        case 201:
index a40c4411729ee2ea9bc8cb192ca99105ee2d978e..48a81362cb851b7e0c9163191d2a61e14ca6d2ab 100644 (file)
@@ -39,15 +39,15 @@ struct hash_testvec {
 struct cipher_testvec {
        char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
        char iv[MAX_IVLEN];
-       char input[48];
-       char result[48];
+       char input[512];
+       char result[512];
        unsigned char tap[MAX_TAP];
        int np;
        unsigned char fail;
        unsigned char wk; /* weak key flag */
        unsigned char klen;
-       unsigned char ilen;
-       unsigned char rlen;
+       unsigned short ilen;
+       unsigned short rlen;
 };
 
 struct cipher_speed {
@@ -933,6 +933,74 @@ static struct hash_testvec hmac_sha256_tv_template[] = {
        },
 };
 
+#define XCBC_AES_TEST_VECTORS 6
+
+static struct hash_testvec aes_xcbc128_tv_template[] = {
+       {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { [0 ... 15] = 0 },
+               .digest = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
+                           0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 },
+               .psize  = 0,
+               .ksize  = 16,
+       }, {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { 0x00, 0x01, 0x02 },
+               .digest = { 0x5b, 0x37, 0x65, 0x80, 0xae, 0x2f, 0x19, 0xaf,
+                           0xe7, 0x21, 0x9c, 0xee, 0xf1, 0x72, 0x75, 0x6f },
+               .psize  = 3,
+               .ksize  = 16,
+       } , {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                              0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .digest = { 0xd2, 0xa2, 0x46, 0xfa, 0x34, 0x9b, 0x68, 0xa7,
+                           0x99, 0x98, 0xa4, 0x39, 0x4f, 0xf7, 0xa2, 0x63 },
+               .psize  = 16,
+               .ksize  = 16,
+       }, {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                              0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                              0x10, 0x11, 0x12, 0x13 },
+               .digest = { 0x47, 0xf5, 0x1b, 0x45, 0x64, 0x96, 0x62, 0x15,
+                           0xb8, 0x98, 0x5c, 0x63, 0x05, 0x5e, 0xd3, 0x08 },
+               .tap    = { 10, 10 },
+               .psize  = 20,
+               .np     = 2,
+               .ksize  = 16,
+       }, {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                              0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                              0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                              0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+               .digest = { 0xf5, 0x4f, 0x0e, 0xc8, 0xd2, 0xb9, 0xf3, 0xd3,
+                           0x68, 0x07, 0x73, 0x4b, 0xd5, 0x28, 0x3f, 0xd4 },
+               .psize  = 32,
+               .ksize  = 16,
+       }, {
+               .key    = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                           0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f },
+               .plaintext = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                              0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+                              0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                              0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+                              0x20, 0x21 },
+               .digest = { 0xbe, 0xcb, 0xb3, 0xbc, 0xcd, 0xb5, 0x18, 0xa3,
+                           0x06, 0x77, 0xd5, 0x48, 0x1f, 0xb6, 0xb4, 0xd8 },
+               .tap    = { 17, 17 },
+               .psize  = 34,
+               .np     = 2,
+               .ksize  = 16,
+       }
+};
+
 /*
  * DES test vectors.
  */
@@ -1831,6 +1899,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
 #define AES_DEC_TEST_VECTORS 3
 #define AES_CBC_ENC_TEST_VECTORS 2
 #define AES_CBC_DEC_TEST_VECTORS 2
+#define AES_LRW_ENC_TEST_VECTORS 8
+#define AES_LRW_DEC_TEST_VECTORS 8
 
 static struct cipher_testvec aes_enc_tv_template[] = {
        { /* From FIPS-197 */
@@ -1968,6 +2038,509 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
        },
 };
 
+static struct cipher_testvec aes_lrw_enc_tv_template[] = {
+       /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+       { /* LRW-32-AES 1 */
+               .key    = { 0x45, 0x62, 0xac, 0x25, 0xf8, 0x28, 0x17, 0x6d,
+                           0x4c, 0x26, 0x84, 0x14, 0xb5, 0x68, 0x01, 0x85,
+                           0x25, 0x8e, 0x2a, 0x05, 0xe7, 0x3e, 0x9d, 0x03,
+                           0xee, 0x5a, 0x83, 0x0c, 0xcc, 0x09, 0x4c, 0x87 },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0xf1, 0xb2, 0x73, 0xcd, 0x65, 0xa3, 0xdf, 0x5f,
+                           0xe9, 0x5d, 0x48, 0x92, 0x54, 0x63, 0x4e, 0xb8 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 2 */
+               .key    = { 0x59, 0x70, 0x47, 0x14, 0xf5, 0x57, 0x47, 0x8c,
+                           0xd7, 0x79, 0xe8, 0x0f, 0x54, 0x88, 0x79, 0x44,
+                           0x0d, 0x48, 0xf0, 0xb7, 0xb1, 0x5a, 0x53, 0xea,
+                           0x1c, 0xaa, 0x6b, 0x29, 0xc2, 0xca, 0xfb, 0xaf
+               },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0x00, 0xc8, 0x2b, 0xae, 0x95, 0xbb, 0xcd, 0xe5,
+                           0x27, 0x4f, 0x07, 0x69, 0xb2, 0x60, 0xe1, 0x36 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 3 */
+               .key    = { 0xd8, 0x2a, 0x91, 0x34, 0xb2, 0x6a, 0x56, 0x50,
+                           0x30, 0xfe, 0x69, 0xe2, 0x37, 0x7f, 0x98, 0x47,
+                           0xcd, 0xf9, 0x0b, 0x16, 0x0c, 0x64, 0x8f, 0xb6,
+                           0xb0, 0x0d, 0x0d, 0x1b, 0xae, 0x85, 0x87, 0x1f },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0x76, 0x32, 0x21, 0x83, 0xed, 0x8f, 0xf1, 0x82,
+                           0xf9, 0x59, 0x62, 0x03, 0x69, 0x0e, 0x5e, 0x01 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 4 */
+               .key    = { 0x0f, 0x6a, 0xef, 0xf8, 0xd3, 0xd2, 0xbb, 0x15,
+                           0x25, 0x83, 0xf7, 0x3c, 0x1f, 0x01, 0x28, 0x74,
+                           0xca, 0xc6, 0xbc, 0x35, 0x4d, 0x4a, 0x65, 0x54,
+                           0x90, 0xae, 0x61, 0xcf, 0x7b, 0xae, 0xbd, 0xcc,
+                           0xad, 0xe4, 0x94, 0xc5, 0x4a, 0x29, 0xae, 0x70 },
+               .klen   = 40,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0x9c, 0x0f, 0x15, 0x2f, 0x55, 0xa2, 0xd8, 0xf0,
+                           0xd6, 0x7b, 0x8f, 0x9e, 0x28, 0x22, 0xbc, 0x41 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 5 */
+               .key    = { 0x8a, 0xd4, 0xee, 0x10, 0x2f, 0xbd, 0x81, 0xff,
+                           0xf8, 0x86, 0xce, 0xac, 0x93, 0xc5, 0xad, 0xc6,
+                           0xa0, 0x19, 0x07, 0xc0, 0x9d, 0xf7, 0xbb, 0xdd,
+                           0x52, 0x13, 0xb2, 0xb7, 0xf0, 0xff, 0x11, 0xd8,
+                           0xd6, 0x08, 0xd0, 0xcd, 0x2e, 0xb1, 0x17, 0x6f },
+               .klen   = 40,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0xd4, 0x27, 0x6a, 0x7f, 0x14, 0x91, 0x3d, 0x65,
+                           0xc8, 0x60, 0x48, 0x02, 0x87, 0xe3, 0x34, 0x06 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 6 */
+               .key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+                           0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+                           0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+                           0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+                           0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+                           0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0xbd, 0x06, 0xb8, 0xe1, 0xdb, 0x98, 0x89, 0x9e,
+                           0xc4, 0x98, 0xe4, 0x91, 0xcf, 0x1c, 0x70, 0x2b },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 7 */
+               .key    = { 0xfb, 0x76, 0x15, 0xb2, 0x3d, 0x80, 0x89, 0x1d,
+                           0xd4, 0x70, 0x98, 0x0b, 0xc7, 0x95, 0x84, 0xc8,
+                           0xb2, 0xfb, 0x64, 0xce, 0x60, 0x97, 0x87, 0x8d,
+                           0x17, 0xfc, 0xe4, 0x5a, 0x49, 0xe8, 0x30, 0xb7,
+                           0x6e, 0x78, 0x17, 0xe7, 0x2d, 0x5e, 0x12, 0xd4,
+                           0x60, 0x64, 0x04, 0x7a, 0xf1, 0x2f, 0x9e, 0x0c },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .ilen   = 16,
+               .result = { 0x5b, 0x90, 0x8e, 0xc1, 0xab, 0xdd, 0x67, 0x5f,
+                           0x3d, 0x69, 0x8a, 0x95, 0x53, 0xc8, 0x9c, 0xe5 },
+               .rlen   = 16,
+       }, {
+/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
+               .key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+                           0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+                           0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+                           0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+                           0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+                           0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x05, 0x11, 0xb7, 0x18, 0xab, 0xc6, 0x2d, 0xac,
+                           0x70, 0x5d, 0xf6, 0x22, 0x94, 0xcd, 0xe5, 0x6c,
+                           0x17, 0x6b, 0xf6, 0x1c, 0xf0, 0xf3, 0x6e, 0xf8,
+                           0x50, 0x38, 0x1f, 0x71, 0x49, 0xb6, 0x57, 0xd6,
+                           0x8f, 0xcb, 0x8d, 0x6b, 0xe3, 0xa6, 0x29, 0x90,
+                           0xfe, 0x2a, 0x62, 0x82, 0xae, 0x6d, 0x8b, 0xf6,
+                           0xad, 0x1e, 0x9e, 0x20, 0x5f, 0x38, 0xbe, 0x04,
+                           0xda, 0x10, 0x8e, 0xed, 0xa2, 0xa4, 0x87, 0xab,
+                           0xda, 0x6b, 0xb4, 0x0c, 0x75, 0xba, 0xd3, 0x7c,
+                           0xc9, 0xac, 0x42, 0x31, 0x95, 0x7c, 0xc9, 0x04,
+                           0xeb, 0xd5, 0x6e, 0x32, 0x69, 0x8a, 0xdb, 0xa6,
+                           0x15, 0xd7, 0x3f, 0x4f, 0x2f, 0x66, 0x69, 0x03,
+                           0x9c, 0x1f, 0x54, 0x0f, 0xde, 0x1f, 0xf3, 0x65,
+                           0x4c, 0x96, 0x12, 0xed, 0x7c, 0x92, 0x03, 0x01,
+                           0x6f, 0xbc, 0x35, 0x93, 0xac, 0xf1, 0x27, 0xf1,
+                           0xb4, 0x96, 0x82, 0x5a, 0x5f, 0xb0, 0xa0, 0x50,
+                           0x89, 0xa4, 0x8e, 0x66, 0x44, 0x85, 0xcc, 0xfd,
+                           0x33, 0x14, 0x70, 0xe3, 0x96, 0xb2, 0xc3, 0xd3,
+                           0xbb, 0x54, 0x5a, 0x1a, 0xf9, 0x74, 0xa2, 0xc5,
+                           0x2d, 0x64, 0x75, 0xdd, 0xb4, 0x54, 0xe6, 0x74,
+                           0x8c, 0xd3, 0x9d, 0x9e, 0x86, 0xab, 0x51, 0x53,
+                           0xb7, 0x93, 0x3e, 0x6f, 0xd0, 0x4e, 0x2c, 0x40,
+                           0xf6, 0xa8, 0x2e, 0x3e, 0x9d, 0xf4, 0x66, 0xa5,
+                           0x76, 0x12, 0x73, 0x44, 0x1a, 0x56, 0xd7, 0x72,
+                           0x88, 0xcd, 0x21, 0x8c, 0x4c, 0x0f, 0xfe, 0xda,
+                           0x95, 0xe0, 0x3a, 0xa6, 0xa5, 0x84, 0x46, 0xcd,
+                           0xd5, 0x3e, 0x9d, 0x3a, 0xe2, 0x67, 0xe6, 0x60,
+                           0x1a, 0xe2, 0x70, 0x85, 0x58, 0xc2, 0x1b, 0x09,
+                           0xe1, 0xd7, 0x2c, 0xca, 0xad, 0xa8, 0x8f, 0xf9,
+                           0xac, 0xb3, 0x0e, 0xdb, 0xca, 0x2e, 0xe2, 0xb8,
+                           0x51, 0x71, 0xd9, 0x3c, 0x6c, 0xf1, 0x56, 0xf8,
+                           0xea, 0x9c, 0xf1, 0xfb, 0x0c, 0xe6, 0xb7, 0x10,
+                           0x1c, 0xf8, 0xa9, 0x7c, 0xe8, 0x53, 0x35, 0xc1,
+                           0x90, 0x3e, 0x76, 0x4a, 0x74, 0xa4, 0x21, 0x2c,
+                           0xf6, 0x2c, 0x4e, 0x0f, 0x94, 0x3a, 0x88, 0x2e,
+                           0x41, 0x09, 0x6a, 0x33, 0x7d, 0xf6, 0xdd, 0x3f,
+                           0x8d, 0x23, 0x31, 0x74, 0x84, 0xeb, 0x88, 0x6e,
+                           0xcc, 0xb9, 0xbc, 0x22, 0x83, 0x19, 0x07, 0x22,
+                           0xa5, 0x2d, 0xdf, 0xa5, 0xf3, 0x80, 0x85, 0x78,
+                           0x84, 0x39, 0x6a, 0x6d, 0x6a, 0x99, 0x4f, 0xa5,
+                           0x15, 0xfe, 0x46, 0xb0, 0xe4, 0x6c, 0xa5, 0x41,
+                           0x3c, 0xce, 0x8f, 0x42, 0x60, 0x71, 0xa7, 0x75,
+                           0x08, 0x40, 0x65, 0x8a, 0x82, 0xbf, 0xf5, 0x43,
+                           0x71, 0x96, 0xa9, 0x4d, 0x44, 0x8a, 0x20, 0xbe,
+                           0xfa, 0x4d, 0xbb, 0xc0, 0x7d, 0x31, 0x96, 0x65,
+                           0xe7, 0x75, 0xe5, 0x3e, 0xfd, 0x92, 0x3b, 0xc9,
+                           0x55, 0xbb, 0x16, 0x7e, 0xf7, 0xc2, 0x8c, 0xa4,
+                           0x40, 0x1d, 0xe5, 0xef, 0x0e, 0xdf, 0xe4, 0x9a,
+                           0x62, 0x73, 0x65, 0xfd, 0x46, 0x63, 0x25, 0x3d,
+                           0x2b, 0xaf, 0xe5, 0x64, 0xfe, 0xa5, 0x5c, 0xcf,
+                           0x24, 0xf3, 0xb4, 0xac, 0x64, 0xba, 0xdf, 0x4b,
+                           0xc6, 0x96, 0x7d, 0x81, 0x2d, 0x8d, 0x97, 0xf7,
+                           0xc5, 0x68, 0x77, 0x84, 0x32, 0x2b, 0xcc, 0x85,
+                           0x74, 0x96, 0xf0, 0x12, 0x77, 0x61, 0xb9, 0xeb,
+                           0x71, 0xaa, 0x82, 0xcb, 0x1c, 0xdb, 0x89, 0xc8,
+                           0xc6, 0xb5, 0xe3, 0x5c, 0x7d, 0x39, 0x07, 0x24,
+                           0xda, 0x39, 0x87, 0x45, 0xc0, 0x2b, 0xbb, 0x01,
+                           0xac, 0xbc, 0x2a, 0x5c, 0x7f, 0xfc, 0xe8, 0xce,
+                           0x6d, 0x9c, 0x6f, 0xed, 0xd3, 0xc1, 0xa1, 0xd6,
+                           0xc5, 0x55, 0xa9, 0x66, 0x2f, 0xe1, 0xc8, 0x32,
+                           0xa6, 0x5d, 0xa4, 0x3a, 0x98, 0x73, 0xe8, 0x45,
+                           0xa4, 0xc7, 0xa8, 0xb4, 0xf6, 0x13, 0x03, 0xf6,
+                           0xe9, 0x2e, 0xc4, 0x29, 0x0f, 0x84, 0xdb, 0xc4,
+                           0x21, 0xc4, 0xc2, 0x75, 0x67, 0x89, 0x37, 0x0a },
+               .ilen   = 512,
+               .result = { 0x1a, 0x1d, 0xa9, 0x30, 0xad, 0xf9, 0x2f, 0x9b,
+                           0xb6, 0x1d, 0xae, 0xef, 0xf0, 0x2f, 0xf8, 0x5a,
+                           0x39, 0x3c, 0xbf, 0x2a, 0xb2, 0x45, 0xb2, 0x23,
+                           0x1b, 0x63, 0x3c, 0xcf, 0xaa, 0xbe, 0xcf, 0x4e,
+                           0xfa, 0xe8, 0x29, 0xc2, 0x20, 0x68, 0x2b, 0x3c,
+                           0x2e, 0x8b, 0xf7, 0x6e, 0x25, 0xbd, 0xe3, 0x3d,
+                           0x66, 0x27, 0xd6, 0xaf, 0xd6, 0x64, 0x3e, 0xe3,
+                           0xe8, 0x58, 0x46, 0x97, 0x39, 0x51, 0x07, 0xde,
+                           0xcb, 0x37, 0xbc, 0xa9, 0xc0, 0x5f, 0x75, 0xc3,
+                           0x0e, 0x84, 0x23, 0x1d, 0x16, 0xd4, 0x1c, 0x59,
+                           0x9c, 0x1a, 0x02, 0x55, 0xab, 0x3a, 0x97, 0x1d,
+                           0xdf, 0xdd, 0xc7, 0x06, 0x51, 0xd7, 0x70, 0xae,
+                           0x23, 0xc6, 0x8c, 0xf5, 0x1e, 0xa0, 0xe5, 0x82,
+                           0xb8, 0xb2, 0xbf, 0x04, 0xa0, 0x32, 0x8e, 0x68,
+                           0xeb, 0xaf, 0x6e, 0x2d, 0x94, 0x22, 0x2f, 0xce,
+                           0x4c, 0xb5, 0x59, 0xe2, 0xa2, 0x2f, 0xa0, 0x98,
+                           0x1a, 0x97, 0xc6, 0xd4, 0xb5, 0x00, 0x59, 0xf2,
+                           0x84, 0x14, 0x72, 0xb1, 0x9a, 0x6e, 0xa3, 0x7f,
+                           0xea, 0x20, 0xe7, 0xcb, 0x65, 0x77, 0x3a, 0xdf,
+                           0xc8, 0x97, 0x67, 0x15, 0xc2, 0x2a, 0x27, 0xcc,
+                           0x18, 0x55, 0xa1, 0x24, 0x0b, 0x24, 0x24, 0xaf,
+                           0x5b, 0xec, 0x68, 0xb8, 0xc8, 0xf5, 0xba, 0x63,
+                           0xff, 0xed, 0x89, 0xce, 0xd5, 0x3d, 0x88, 0xf3,
+                           0x25, 0xef, 0x05, 0x7c, 0x3a, 0xef, 0xeb, 0xd8,
+                           0x7a, 0x32, 0x0d, 0xd1, 0x1e, 0x58, 0x59, 0x99,
+                           0x90, 0x25, 0xb5, 0x26, 0xb0, 0xe3, 0x2b, 0x6c,
+                           0x4c, 0xa9, 0x8b, 0x84, 0x4f, 0x5e, 0x01, 0x50,
+                           0x41, 0x30, 0x58, 0xc5, 0x62, 0x74, 0x52, 0x1d,
+                           0x45, 0x24, 0x6a, 0x42, 0x64, 0x4f, 0x97, 0x1c,
+                           0xa8, 0x66, 0xb5, 0x6d, 0x79, 0xd4, 0x0d, 0x48,
+                           0xc5, 0x5f, 0xf3, 0x90, 0x32, 0xdd, 0xdd, 0xe1,
+                           0xe4, 0xa9, 0x9f, 0xfc, 0xc3, 0x52, 0x5a, 0x46,
+                           0xe4, 0x81, 0x84, 0x95, 0x36, 0x59, 0x7a, 0x6b,
+                           0xaa, 0xb3, 0x60, 0xad, 0xce, 0x9f, 0x9f, 0x28,
+                           0xe0, 0x01, 0x75, 0x22, 0xc4, 0x4e, 0xa9, 0x62,
+                           0x5c, 0x62, 0x0d, 0x00, 0xcb, 0x13, 0xe8, 0x43,
+                           0x72, 0xd4, 0x2d, 0x53, 0x46, 0xb5, 0xd1, 0x16,
+                           0x22, 0x18, 0xdf, 0x34, 0x33, 0xf5, 0xd6, 0x1c,
+                           0xb8, 0x79, 0x78, 0x97, 0x94, 0xff, 0x72, 0x13,
+                           0x4c, 0x27, 0xfc, 0xcb, 0xbf, 0x01, 0x53, 0xa6,
+                           0xb4, 0x50, 0x6e, 0xde, 0xdf, 0xb5, 0x43, 0xa4,
+                           0x59, 0xdf, 0x52, 0xf9, 0x7c, 0xe0, 0x11, 0x6f,
+                           0x2d, 0x14, 0x8e, 0x24, 0x61, 0x2c, 0xe1, 0x17,
+                           0xcc, 0xce, 0x51, 0x0c, 0x19, 0x8a, 0x82, 0x30,
+                           0x94, 0xd5, 0x3d, 0x6a, 0x53, 0x06, 0x5e, 0xbd,
+                           0xb7, 0xeb, 0xfa, 0xfd, 0x27, 0x51, 0xde, 0x85,
+                           0x1e, 0x86, 0x53, 0x11, 0x53, 0x94, 0x00, 0xee,
+                           0x2b, 0x8c, 0x08, 0x2a, 0xbf, 0xdd, 0xae, 0x11,
+                           0xcb, 0x1e, 0xa2, 0x07, 0x9a, 0x80, 0xcf, 0x62,
+                           0x9b, 0x09, 0xdc, 0x95, 0x3c, 0x96, 0x8e, 0xb1,
+                           0x09, 0xbd, 0xe4, 0xeb, 0xdb, 0xca, 0x70, 0x7a,
+                           0x9e, 0xfa, 0x31, 0x18, 0x45, 0x3c, 0x21, 0x33,
+                           0xb0, 0xb3, 0x2b, 0xea, 0xf3, 0x71, 0x2d, 0xe1,
+                           0x03, 0xad, 0x1b, 0x48, 0xd4, 0x67, 0x27, 0xf0,
+                           0x62, 0xe4, 0x3d, 0xfb, 0x9b, 0x08, 0x76, 0xe7,
+                           0xdd, 0x2b, 0x01, 0x39, 0x04, 0x5a, 0x58, 0x7a,
+                           0xf7, 0x11, 0x90, 0xec, 0xbd, 0x51, 0x5c, 0x32,
+                           0x6b, 0xd7, 0x35, 0x39, 0x02, 0x6b, 0xf2, 0xa6,
+                           0xd0, 0x0d, 0x07, 0xe1, 0x06, 0xc4, 0x5b, 0x7d,
+                           0xe4, 0x6a, 0xd7, 0xee, 0x15, 0x1f, 0x83, 0xb4,
+                           0xa3, 0xa7, 0x5e, 0xc3, 0x90, 0xb7, 0xef, 0xd3,
+                           0xb7, 0x4f, 0xf8, 0x92, 0x4c, 0xb7, 0x3c, 0x29,
+                           0xcd, 0x7e, 0x2b, 0x5d, 0x43, 0xea, 0x42, 0xe7,
+                           0x74, 0x3f, 0x7d, 0x58, 0x88, 0x75, 0xde, 0x3e },
+               .rlen   = 512,
+       }
+};
+
+static struct cipher_testvec aes_lrw_dec_tv_template[] = {
+       /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
+       /* same as enc vectors with input and result reversed */
+       { /* LRW-32-AES 1 */
+               .key    = { 0x45, 0x62, 0xac, 0x25, 0xf8, 0x28, 0x17, 0x6d,
+                           0x4c, 0x26, 0x84, 0x14, 0xb5, 0x68, 0x01, 0x85,
+                           0x25, 0x8e, 0x2a, 0x05, 0xe7, 0x3e, 0x9d, 0x03,
+                           0xee, 0x5a, 0x83, 0x0c, 0xcc, 0x09, 0x4c, 0x87 },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0xf1, 0xb2, 0x73, 0xcd, 0x65, 0xa3, 0xdf, 0x5f,
+                           0xe9, 0x5d, 0x48, 0x92, 0x54, 0x63, 0x4e, 0xb8 },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 2 */
+               .key    = { 0x59, 0x70, 0x47, 0x14, 0xf5, 0x57, 0x47, 0x8c,
+                           0xd7, 0x79, 0xe8, 0x0f, 0x54, 0x88, 0x79, 0x44,
+                           0x0d, 0x48, 0xf0, 0xb7, 0xb1, 0x5a, 0x53, 0xea,
+                           0x1c, 0xaa, 0x6b, 0x29, 0xc2, 0xca, 0xfb, 0xaf
+               },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
+               .input  = { 0x00, 0xc8, 0x2b, 0xae, 0x95, 0xbb, 0xcd, 0xe5,
+                           0x27, 0x4f, 0x07, 0x69, 0xb2, 0x60, 0xe1, 0x36 },
+               .ilen   = 16,
+               .result  = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                            0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 3 */
+               .key    = { 0xd8, 0x2a, 0x91, 0x34, 0xb2, 0x6a, 0x56, 0x50,
+                           0x30, 0xfe, 0x69, 0xe2, 0x37, 0x7f, 0x98, 0x47,
+                           0xcd, 0xf9, 0x0b, 0x16, 0x0c, 0x64, 0x8f, 0xb6,
+                           0xb0, 0x0d, 0x0d, 0x1b, 0xae, 0x85, 0x87, 0x1f },
+               .klen   = 32,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0x76, 0x32, 0x21, 0x83, 0xed, 0x8f, 0xf1, 0x82,
+                           0xf9, 0x59, 0x62, 0x03, 0x69, 0x0e, 0x5e, 0x01 },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 4 */
+               .key    = { 0x0f, 0x6a, 0xef, 0xf8, 0xd3, 0xd2, 0xbb, 0x15,
+                           0x25, 0x83, 0xf7, 0x3c, 0x1f, 0x01, 0x28, 0x74,
+                           0xca, 0xc6, 0xbc, 0x35, 0x4d, 0x4a, 0x65, 0x54,
+                           0x90, 0xae, 0x61, 0xcf, 0x7b, 0xae, 0xbd, 0xcc,
+                           0xad, 0xe4, 0x94, 0xc5, 0x4a, 0x29, 0xae, 0x70 },
+               .klen   = 40,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x9c, 0x0f, 0x15, 0x2f, 0x55, 0xa2, 0xd8, 0xf0,
+                           0xd6, 0x7b, 0x8f, 0x9e, 0x28, 0x22, 0xbc, 0x41 },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 5 */
+               .key    = { 0x8a, 0xd4, 0xee, 0x10, 0x2f, 0xbd, 0x81, 0xff,
+                           0xf8, 0x86, 0xce, 0xac, 0x93, 0xc5, 0xad, 0xc6,
+                           0xa0, 0x19, 0x07, 0xc0, 0x9d, 0xf7, 0xbb, 0xdd,
+                           0x52, 0x13, 0xb2, 0xb7, 0xf0, 0xff, 0x11, 0xd8,
+                           0xd6, 0x08, 0xd0, 0xcd, 0x2e, 0xb1, 0x17, 0x6f },
+               .klen   = 40,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0xd4, 0x27, 0x6a, 0x7f, 0x14, 0x91, 0x3d, 0x65,
+                           0xc8, 0x60, 0x48, 0x02, 0x87, 0xe3, 0x34, 0x06 },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 6 */
+               .key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+                           0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+                           0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+                           0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+                           0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+                           0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0xbd, 0x06, 0xb8, 0xe1, 0xdb, 0x98, 0x89, 0x9e,
+                           0xc4, 0x98, 0xe4, 0x91, 0xcf, 0x1c, 0x70, 0x2b },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, { /* LRW-32-AES 7 */
+               .key    = { 0xfb, 0x76, 0x15, 0xb2, 0x3d, 0x80, 0x89, 0x1d,
+                           0xd4, 0x70, 0x98, 0x0b, 0xc7, 0x95, 0x84, 0xc8,
+                           0xb2, 0xfb, 0x64, 0xce, 0x60, 0x97, 0x87, 0x8d,
+                           0x17, 0xfc, 0xe4, 0x5a, 0x49, 0xe8, 0x30, 0xb7,
+                           0x6e, 0x78, 0x17, 0xe7, 0x2d, 0x5e, 0x12, 0xd4,
+                           0x60, 0x64, 0x04, 0x7a, 0xf1, 0x2f, 0x9e, 0x0c },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 },
+               .input  = { 0x5b, 0x90, 0x8e, 0xc1, 0xab, 0xdd, 0x67, 0x5f,
+                           0x3d, 0x69, 0x8a, 0x95, 0x53, 0xc8, 0x9c, 0xe5 },
+               .ilen   = 16,
+               .result = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+                           0x38, 0x39, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46 },
+               .rlen   = 16,
+       }, {
+/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
+               .key    = { 0xf8, 0xd4, 0x76, 0xff, 0xd6, 0x46, 0xee, 0x6c,
+                           0x23, 0x84, 0xcb, 0x1c, 0x77, 0xd6, 0x19, 0x5d,
+                           0xfe, 0xf1, 0xa9, 0xf3, 0x7b, 0xbc, 0x8d, 0x21,
+                           0xa7, 0x9c, 0x21, 0xf8, 0xcb, 0x90, 0x02, 0x89,
+                           0xa8, 0x45, 0x34, 0x8e, 0xc8, 0xc5, 0xb5, 0xf1,
+                           0x26, 0xf5, 0x0e, 0x76, 0xfe, 0xfd, 0x1b, 0x1e },
+               .klen   = 48,
+               .iv     = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+                           0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 },
+               .input  = { 0x1a, 0x1d, 0xa9, 0x30, 0xad, 0xf9, 0x2f, 0x9b,
+                           0xb6, 0x1d, 0xae, 0xef, 0xf0, 0x2f, 0xf8, 0x5a,
+                           0x39, 0x3c, 0xbf, 0x2a, 0xb2, 0x45, 0xb2, 0x23,
+                           0x1b, 0x63, 0x3c, 0xcf, 0xaa, 0xbe, 0xcf, 0x4e,
+                           0xfa, 0xe8, 0x29, 0xc2, 0x20, 0x68, 0x2b, 0x3c,
+                           0x2e, 0x8b, 0xf7, 0x6e, 0x25, 0xbd, 0xe3, 0x3d,
+                           0x66, 0x27, 0xd6, 0xaf, 0xd6, 0x64, 0x3e, 0xe3,
+                           0xe8, 0x58, 0x46, 0x97, 0x39, 0x51, 0x07, 0xde,
+                           0xcb, 0x37, 0xbc, 0xa9, 0xc0, 0x5f, 0x75, 0xc3,
+                           0x0e, 0x84, 0x23, 0x1d, 0x16, 0xd4, 0x1c, 0x59,
+                           0x9c, 0x1a, 0x02, 0x55, 0xab, 0x3a, 0x97, 0x1d,
+                           0xdf, 0xdd, 0xc7, 0x06, 0x51, 0xd7, 0x70, 0xae,
+                           0x23, 0xc6, 0x8c, 0xf5, 0x1e, 0xa0, 0xe5, 0x82,
+                           0xb8, 0xb2, 0xbf, 0x04, 0xa0, 0x32, 0x8e, 0x68,
+                           0xeb, 0xaf, 0x6e, 0x2d, 0x94, 0x22, 0x2f, 0xce,
+                           0x4c, 0xb5, 0x59, 0xe2, 0xa2, 0x2f, 0xa0, 0x98,
+                           0x1a, 0x97, 0xc6, 0xd4, 0xb5, 0x00, 0x59, 0xf2,
+                           0x84, 0x14, 0x72, 0xb1, 0x9a, 0x6e, 0xa3, 0x7f,
+                           0xea, 0x20, 0xe7, 0xcb, 0x65, 0x77, 0x3a, 0xdf,
+                           0xc8, 0x97, 0x67, 0x15, 0xc2, 0x2a, 0x27, 0xcc,
+                           0x18, 0x55, 0xa1, 0x24, 0x0b, 0x24, 0x24, 0xaf,
+                           0x5b, 0xec, 0x68, 0xb8, 0xc8, 0xf5, 0xba, 0x63,
+                           0xff, 0xed, 0x89, 0xce, 0xd5, 0x3d, 0x88, 0xf3,
+                           0x25, 0xef, 0x05, 0x7c, 0x3a, 0xef, 0xeb, 0xd8,
+                           0x7a, 0x32, 0x0d, 0xd1, 0x1e, 0x58, 0x59, 0x99,
+                           0x90, 0x25, 0xb5, 0x26, 0xb0, 0xe3, 0x2b, 0x6c,
+                           0x4c, 0xa9, 0x8b, 0x84, 0x4f, 0x5e, 0x01, 0x50,
+                           0x41, 0x30, 0x58, 0xc5, 0x62, 0x74, 0x52, 0x1d,
+                           0x45, 0x24, 0x6a, 0x42, 0x64, 0x4f, 0x97, 0x1c,
+                           0xa8, 0x66, 0xb5, 0x6d, 0x79, 0xd4, 0x0d, 0x48,
+                           0xc5, 0x5f, 0xf3, 0x90, 0x32, 0xdd, 0xdd, 0xe1,
+                           0xe4, 0xa9, 0x9f, 0xfc, 0xc3, 0x52, 0x5a, 0x46,
+                           0xe4, 0x81, 0x84, 0x95, 0x36, 0x59, 0x7a, 0x6b,
+                           0xaa, 0xb3, 0x60, 0xad, 0xce, 0x9f, 0x9f, 0x28,
+                           0xe0, 0x01, 0x75, 0x22, 0xc4, 0x4e, 0xa9, 0x62,
+                           0x5c, 0x62, 0x0d, 0x00, 0xcb, 0x13, 0xe8, 0x43,
+                           0x72, 0xd4, 0x2d, 0x53, 0x46, 0xb5, 0xd1, 0x16,
+                           0x22, 0x18, 0xdf, 0x34, 0x33, 0xf5, 0xd6, 0x1c,
+                           0xb8, 0x79, 0x78, 0x97, 0x94, 0xff, 0x72, 0x13,
+                           0x4c, 0x27, 0xfc, 0xcb, 0xbf, 0x01, 0x53, 0xa6,
+                           0xb4, 0x50, 0x6e, 0xde, 0xdf, 0xb5, 0x43, 0xa4,
+                           0x59, 0xdf, 0x52, 0xf9, 0x7c, 0xe0, 0x11, 0x6f,
+                           0x2d, 0x14, 0x8e, 0x24, 0x61, 0x2c, 0xe1, 0x17,
+                           0xcc, 0xce, 0x51, 0x0c, 0x19, 0x8a, 0x82, 0x30,
+                           0x94, 0xd5, 0x3d, 0x6a, 0x53, 0x06, 0x5e, 0xbd,
+                           0xb7, 0xeb, 0xfa, 0xfd, 0x27, 0x51, 0xde, 0x85,
+                           0x1e, 0x86, 0x53, 0x11, 0x53, 0x94, 0x00, 0xee,
+                           0x2b, 0x8c, 0x08, 0x2a, 0xbf, 0xdd, 0xae, 0x11,
+                           0xcb, 0x1e, 0xa2, 0x07, 0x9a, 0x80, 0xcf, 0x62,
+                           0x9b, 0x09, 0xdc, 0x95, 0x3c, 0x96, 0x8e, 0xb1,
+                           0x09, 0xbd, 0xe4, 0xeb, 0xdb, 0xca, 0x70, 0x7a,
+                           0x9e, 0xfa, 0x31, 0x18, 0x45, 0x3c, 0x21, 0x33,
+                           0xb0, 0xb3, 0x2b, 0xea, 0xf3, 0x71, 0x2d, 0xe1,
+                           0x03, 0xad, 0x1b, 0x48, 0xd4, 0x67, 0x27, 0xf0,
+                           0x62, 0xe4, 0x3d, 0xfb, 0x9b, 0x08, 0x76, 0xe7,
+                           0xdd, 0x2b, 0x01, 0x39, 0x04, 0x5a, 0x58, 0x7a,
+                           0xf7, 0x11, 0x90, 0xec, 0xbd, 0x51, 0x5c, 0x32,
+                           0x6b, 0xd7, 0x35, 0x39, 0x02, 0x6b, 0xf2, 0xa6,
+                           0xd0, 0x0d, 0x07, 0xe1, 0x06, 0xc4, 0x5b, 0x7d,
+                           0xe4, 0x6a, 0xd7, 0xee, 0x15, 0x1f, 0x83, 0xb4,
+                           0xa3, 0xa7, 0x5e, 0xc3, 0x90, 0xb7, 0xef, 0xd3,
+                           0xb7, 0x4f, 0xf8, 0x92, 0x4c, 0xb7, 0x3c, 0x29,
+                           0xcd, 0x7e, 0x2b, 0x5d, 0x43, 0xea, 0x42, 0xe7,
+                           0x74, 0x3f, 0x7d, 0x58, 0x88, 0x75, 0xde, 0x3e },
+               .ilen   = 512,
+               .result = { 0x05, 0x11, 0xb7, 0x18, 0xab, 0xc6, 0x2d, 0xac,
+                           0x70, 0x5d, 0xf6, 0x22, 0x94, 0xcd, 0xe5, 0x6c,
+                           0x17, 0x6b, 0xf6, 0x1c, 0xf0, 0xf3, 0x6e, 0xf8,
+                           0x50, 0x38, 0x1f, 0x71, 0x49, 0xb6, 0x57, 0xd6,
+                           0x8f, 0xcb, 0x8d, 0x6b, 0xe3, 0xa6, 0x29, 0x90,
+                           0xfe, 0x2a, 0x62, 0x82, 0xae, 0x6d, 0x8b, 0xf6,
+                           0xad, 0x1e, 0x9e, 0x20, 0x5f, 0x38, 0xbe, 0x04,
+                           0xda, 0x10, 0x8e, 0xed, 0xa2, 0xa4, 0x87, 0xab,
+                           0xda, 0x6b, 0xb4, 0x0c, 0x75, 0xba, 0xd3, 0x7c,
+                           0xc9, 0xac, 0x42, 0x31, 0x95, 0x7c, 0xc9, 0x04,
+                           0xeb, 0xd5, 0x6e, 0x32, 0x69, 0x8a, 0xdb, 0xa6,
+                           0x15, 0xd7, 0x3f, 0x4f, 0x2f, 0x66, 0x69, 0x03,
+                           0x9c, 0x1f, 0x54, 0x0f, 0xde, 0x1f, 0xf3, 0x65,
+                           0x4c, 0x96, 0x12, 0xed, 0x7c, 0x92, 0x03, 0x01,
+                           0x6f, 0xbc, 0x35, 0x93, 0xac, 0xf1, 0x27, 0xf1,
+                           0xb4, 0x96, 0x82, 0x5a, 0x5f, 0xb0, 0xa0, 0x50,
+                           0x89, 0xa4, 0x8e, 0x66, 0x44, 0x85, 0xcc, 0xfd,
+                           0x33, 0x14, 0x70, 0xe3, 0x96, 0xb2, 0xc3, 0xd3,
+                           0xbb, 0x54, 0x5a, 0x1a, 0xf9, 0x74, 0xa2, 0xc5,
+                           0x2d, 0x64, 0x75, 0xdd, 0xb4, 0x54, 0xe6, 0x74,
+                           0x8c, 0xd3, 0x9d, 0x9e, 0x86, 0xab, 0x51, 0x53,
+                           0xb7, 0x93, 0x3e, 0x6f, 0xd0, 0x4e, 0x2c, 0x40,
+                           0xf6, 0xa8, 0x2e, 0x3e, 0x9d, 0xf4, 0x66, 0xa5,
+                           0x76, 0x12, 0x73, 0x44, 0x1a, 0x56, 0xd7, 0x72,
+                           0x88, 0xcd, 0x21, 0x8c, 0x4c, 0x0f, 0xfe, 0xda,
+                           0x95, 0xe0, 0x3a, 0xa6, 0xa5, 0x84, 0x46, 0xcd,
+                           0xd5, 0x3e, 0x9d, 0x3a, 0xe2, 0x67, 0xe6, 0x60,
+                           0x1a, 0xe2, 0x70, 0x85, 0x58, 0xc2, 0x1b, 0x09,
+                           0xe1, 0xd7, 0x2c, 0xca, 0xad, 0xa8, 0x8f, 0xf9,
+                           0xac, 0xb3, 0x0e, 0xdb, 0xca, 0x2e, 0xe2, 0xb8,
+                           0x51, 0x71, 0xd9, 0x3c, 0x6c, 0xf1, 0x56, 0xf8,
+                           0xea, 0x9c, 0xf1, 0xfb, 0x0c, 0xe6, 0xb7, 0x10,
+                           0x1c, 0xf8, 0xa9, 0x7c, 0xe8, 0x53, 0x35, 0xc1,
+                           0x90, 0x3e, 0x76, 0x4a, 0x74, 0xa4, 0x21, 0x2c,
+                           0xf6, 0x2c, 0x4e, 0x0f, 0x94, 0x3a, 0x88, 0x2e,
+                           0x41, 0x09, 0x6a, 0x33, 0x7d, 0xf6, 0xdd, 0x3f,
+                           0x8d, 0x23, 0x31, 0x74, 0x84, 0xeb, 0x88, 0x6e,
+                           0xcc, 0xb9, 0xbc, 0x22, 0x83, 0x19, 0x07, 0x22,
+                           0xa5, 0x2d, 0xdf, 0xa5, 0xf3, 0x80, 0x85, 0x78,
+                           0x84, 0x39, 0x6a, 0x6d, 0x6a, 0x99, 0x4f, 0xa5,
+                           0x15, 0xfe, 0x46, 0xb0, 0xe4, 0x6c, 0xa5, 0x41,
+                           0x3c, 0xce, 0x8f, 0x42, 0x60, 0x71, 0xa7, 0x75,
+                           0x08, 0x40, 0x65, 0x8a, 0x82, 0xbf, 0xf5, 0x43,
+                           0x71, 0x96, 0xa9, 0x4d, 0x44, 0x8a, 0x20, 0xbe,
+                           0xfa, 0x4d, 0xbb, 0xc0, 0x7d, 0x31, 0x96, 0x65,
+                           0xe7, 0x75, 0xe5, 0x3e, 0xfd, 0x92, 0x3b, 0xc9,
+                           0x55, 0xbb, 0x16, 0x7e, 0xf7, 0xc2, 0x8c, 0xa4,
+                           0x40, 0x1d, 0xe5, 0xef, 0x0e, 0xdf, 0xe4, 0x9a,
+                           0x62, 0x73, 0x65, 0xfd, 0x46, 0x63, 0x25, 0x3d,
+                           0x2b, 0xaf, 0xe5, 0x64, 0xfe, 0xa5, 0x5c, 0xcf,
+                           0x24, 0xf3, 0xb4, 0xac, 0x64, 0xba, 0xdf, 0x4b,
+                           0xc6, 0x96, 0x7d, 0x81, 0x2d, 0x8d, 0x97, 0xf7,
+                           0xc5, 0x68, 0x77, 0x84, 0x32, 0x2b, 0xcc, 0x85,
+                           0x74, 0x96, 0xf0, 0x12, 0x77, 0x61, 0xb9, 0xeb,
+                           0x71, 0xaa, 0x82, 0xcb, 0x1c, 0xdb, 0x89, 0xc8,
+                           0xc6, 0xb5, 0xe3, 0x5c, 0x7d, 0x39, 0x07, 0x24,
+                           0xda, 0x39, 0x87, 0x45, 0xc0, 0x2b, 0xbb, 0x01,
+                           0xac, 0xbc, 0x2a, 0x5c, 0x7f, 0xfc, 0xe8, 0xce,
+                           0x6d, 0x9c, 0x6f, 0xed, 0xd3, 0xc1, 0xa1, 0xd6,
+                           0xc5, 0x55, 0xa9, 0x66, 0x2f, 0xe1, 0xc8, 0x32,
+                           0xa6, 0x5d, 0xa4, 0x3a, 0x98, 0x73, 0xe8, 0x45,
+                           0xa4, 0xc7, 0xa8, 0xb4, 0xf6, 0x13, 0x03, 0xf6,
+                           0xe9, 0x2e, 0xc4, 0x29, 0x0f, 0x84, 0xdb, 0xc4,
+                           0x21, 0xc4, 0xc2, 0x75, 0x67, 0x89, 0x37, 0x0a },
+               .rlen   = 512,
+       }
+};
+
 /* Cast5 test vectors from RFC 2144 */
 #define CAST5_ENC_TEST_VECTORS 3
 #define CAST5_DEC_TEST_VECTORS 3
@@ -3084,6 +3657,27 @@ static struct cipher_speed aes_speed_template[] = {
        {  .klen = 0, .blen = 0, }
 };
 
+static struct cipher_speed aes_lrw_speed_template[] = {
+       { .klen = 32, .blen = 16, },
+       { .klen = 32, .blen = 64, },
+       { .klen = 32, .blen = 256, },
+       { .klen = 32, .blen = 1024, },
+       { .klen = 32, .blen = 8192, },
+       { .klen = 40, .blen = 16, },
+       { .klen = 40, .blen = 64, },
+       { .klen = 40, .blen = 256, },
+       { .klen = 40, .blen = 1024, },
+       { .klen = 40, .blen = 8192, },
+       { .klen = 48, .blen = 16, },
+       { .klen = 48, .blen = 64, },
+       { .klen = 48, .blen = 256, },
+       { .klen = 48, .blen = 1024, },
+       { .klen = 48, .blen = 8192, },
+
+       /* End marker */
+       {  .klen = 0, .blen = 0, }
+};
+
 static struct cipher_speed des3_ede_speed_template[] = {
        { .klen = 24, .blen = 16, },
        { .klen = 24, .blen = 64, },
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
new file mode 100644 (file)
index 0000000..9347eb6
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C)2006 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Author:
+ *     Kazunori Miyazawa <miyazawa@linux-ipv6.org>
+ */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include "internal.h"
+
+static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
+                          0x02020202, 0x02020202, 0x02020202, 0x02020202,
+                          0x03030303, 0x03030303, 0x03030303, 0x03030303};
+/*
+ * +------------------------
+ * | <parent tfm>
+ * +------------------------
+ * | crypto_xcbc_ctx
+ * +------------------------
+ * | odds (block size)
+ * +------------------------
+ * | prev (block size)
+ * +------------------------
+ * | key (block size)
+ * +------------------------
+ * | consts (block size * 3)
+ * +------------------------
+ */
+struct crypto_xcbc_ctx {
+       struct crypto_tfm *child;
+       u8 *odds;
+       u8 *prev;
+       u8 *key;
+       u8 *consts;
+       void (*xor)(u8 *a, const u8 *b, unsigned int bs);
+       unsigned int keylen;
+       unsigned int len;
+};
+
+static void xor_128(u8 *a, const u8 *b, unsigned int bs)
+{
+       ((u32 *)a)[0] ^= ((u32 *)b)[0];
+       ((u32 *)a)[1] ^= ((u32 *)b)[1];
+       ((u32 *)a)[2] ^= ((u32 *)b)[2];
+       ((u32 *)a)[3] ^= ((u32 *)b)[3];
+}
+
+static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
+                                     struct crypto_xcbc_ctx *ctx)
+{
+       int bs = crypto_hash_blocksize(parent);
+       int err = 0;
+       u8 key1[bs];
+
+       if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
+           return err;
+
+       ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
+                       ctx->consts);
+
+       return crypto_cipher_setkey(ctx->child, key1, bs);
+}
+
+static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
+                                    const u8 *inkey, unsigned int keylen)
+{
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+
+       if (keylen != crypto_tfm_alg_blocksize(ctx->child))
+               return -EINVAL;
+
+       ctx->keylen = keylen;
+       memcpy(ctx->key, inkey, keylen);
+       ctx->consts = (u8*)ks;
+
+       return _crypto_xcbc_digest_setkey(parent, ctx);
+}
+
+static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
+{
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm);
+       int bs = crypto_hash_blocksize(pdesc->tfm);
+
+       ctx->len = 0;
+       memset(ctx->odds, 0, bs);
+       memset(ctx->prev, 0, bs);
+
+       return 0;
+}
+
+static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
+                                    struct scatterlist *sg,
+                                    unsigned int nbytes)
+{
+       struct crypto_hash *parent = pdesc->tfm;
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+       struct crypto_tfm *tfm = ctx->child;
+       int bs = crypto_hash_blocksize(parent);
+       unsigned int i = 0;
+
+       do {
+
+               struct page *pg = sg[i].page;
+               unsigned int offset = sg[i].offset;
+               unsigned int slen = sg[i].length;
+
+               while (slen > 0) {
+                       unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+                       char *p = crypto_kmap(pg, 0) + offset;
+
+                       /* checking the data can fill the block */
+                       if ((ctx->len + len) <= bs) {
+                               memcpy(ctx->odds + ctx->len, p, len);
+                               ctx->len += len;
+                               slen -= len;
+
+                               /* checking the rest of the page */
+                               if (len + offset >= PAGE_SIZE) {
+                                       offset = 0;
+                                       pg++;
+                               } else
+                                       offset += len;
+
+                               crypto_kunmap(p, 0);
+                               crypto_yield(tfm->crt_flags);
+                               continue;
+                       }
+
+                       /* filling odds with new data and encrypting it */
+                       memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
+                       len -= bs - ctx->len;
+                       p += bs - ctx->len;
+
+                       ctx->xor(ctx->prev, ctx->odds, bs);
+                       tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+
+                       /* clearing the length */
+                       ctx->len = 0;
+
+                       /* encrypting the rest of data */
+                       while (len > bs) {
+                               ctx->xor(ctx->prev, p, bs);
+                               tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+                               p += bs;
+                               len -= bs;
+                       }
+
+                       /* keeping the surplus of blocksize */
+                       if (len) {
+                               memcpy(ctx->odds, p, len);
+                               ctx->len = len;
+                       }
+                       crypto_kunmap(p, 0);
+                       crypto_yield(tfm->crt_flags);
+                       slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
+                       offset = 0;
+                       pg++;
+               }
+               nbytes-=sg[i].length;
+               i++;
+       } while (nbytes>0);
+
+       return 0;
+}
+
+static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
+{
+       struct crypto_hash *parent = pdesc->tfm;
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
+       struct crypto_tfm *tfm = ctx->child;
+       int bs = crypto_hash_blocksize(parent);
+       int err = 0;
+
+       if (ctx->len == bs) {
+               u8 key2[bs];
+
+               if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
+                       return err;
+
+               tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
+
+               ctx->xor(ctx->prev, ctx->odds, bs);
+               ctx->xor(ctx->prev, key2, bs);
+               _crypto_xcbc_digest_setkey(parent, ctx);
+
+               tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+       } else {
+               u8 key3[bs];
+               unsigned int rlen;
+               u8 *p = ctx->odds + ctx->len;
+               *p = 0x80;
+               p++;
+
+               rlen = bs - ctx->len -1;
+               if (rlen)
+                       memset(p, 0, rlen);
+
+               if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
+                       return err;
+
+               tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
+
+               ctx->xor(ctx->prev, ctx->odds, bs);
+               ctx->xor(ctx->prev, key3, bs);
+
+               _crypto_xcbc_digest_setkey(parent, ctx);
+
+               tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+       }
+
+       return 0;
+}
+
+static int crypto_xcbc_digest(struct hash_desc *pdesc,
+                 struct scatterlist *sg, unsigned int nbytes, u8 *out)
+{
+       crypto_xcbc_digest_init(pdesc);
+       crypto_xcbc_digest_update(pdesc, sg, nbytes);
+       return crypto_xcbc_digest_final(pdesc, out);
+}
+
+static int xcbc_init_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_instance *inst = (void *)tfm->__crt_alg;
+       struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+       int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
+
+       tfm = crypto_spawn_tfm(spawn);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       switch(bs) {
+       case 16:
+               ctx->xor = xor_128;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ctx->child = crypto_cipher_cast(tfm);
+       ctx->odds = (u8*)(ctx+1);
+       ctx->prev = ctx->odds + bs;
+       ctx->key = ctx->prev + bs;
+
+       return 0;
+};
+
+static void xcbc_exit_tfm(struct crypto_tfm *tfm)
+{
+       struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
+       crypto_free_cipher(ctx->child);
+}
+
+static struct crypto_instance *xcbc_alloc(void *param, unsigned int len)
+{
+       struct crypto_instance *inst;
+       struct crypto_alg *alg;
+       alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
+                                 CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
+       if (IS_ERR(alg))
+               return ERR_PTR(PTR_ERR(alg));
+
+       switch(alg->cra_blocksize) {
+       case 16:
+               break;
+       default:
+               return ERR_PTR(PTR_ERR(alg));
+       }
+
+       inst = crypto_alloc_instance("xcbc", alg);
+       if (IS_ERR(inst))
+               goto out_put_alg;
+
+       inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
+       inst->alg.cra_priority = alg->cra_priority;
+       inst->alg.cra_blocksize = alg->cra_blocksize;
+       inst->alg.cra_alignmask = alg->cra_alignmask;
+       inst->alg.cra_type = &crypto_hash_type;
+
+       inst->alg.cra_hash.digestsize =
+               (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+               CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
+                                      alg->cra_blocksize;
+       inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) +
+                               ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *));
+       inst->alg.cra_init = xcbc_init_tfm;
+       inst->alg.cra_exit = xcbc_exit_tfm;
+
+       inst->alg.cra_hash.init = crypto_xcbc_digest_init;
+       inst->alg.cra_hash.update = crypto_xcbc_digest_update;
+       inst->alg.cra_hash.final = crypto_xcbc_digest_final;
+       inst->alg.cra_hash.digest = crypto_xcbc_digest;
+       inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey;
+
+out_put_alg:
+       crypto_mod_put(alg);
+       return inst;
+}
+
+static void xcbc_free(struct crypto_instance *inst)
+{
+       crypto_drop_spawn(crypto_instance_ctx(inst));
+       kfree(inst);
+}
+
+static struct crypto_template crypto_xcbc_tmpl = {
+       .name = "xcbc",
+       .alloc = xcbc_alloc,
+       .free = xcbc_free,
+       .module = THIS_MODULE,
+};
+
+static int __init crypto_xcbc_module_init(void)
+{
+       return crypto_register_template(&crypto_xcbc_tmpl);
+}
+
+static void __exit crypto_xcbc_module_exit(void)
+{
+       crypto_unregister_template(&crypto_xcbc_tmpl);
+}
+
+module_init(crypto_xcbc_module_init);
+module_exit(crypto_xcbc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("XCBC keyed hash algorithm");
index 4ac14dab3079f8b8910a19d733d1ba2692d41c0b..67711770b1d9c53d95746db422f6b711c8857a2c 100644 (file)
@@ -77,3 +77,4 @@ obj-$(CONFIG_CRYPTO)          += crypto/
 obj-$(CONFIG_SUPERH)           += sh/
 obj-$(CONFIG_GENERIC_TIME)     += clocksource/
 obj-$(CONFIG_DMA_ENGINE)       += dma/
+obj-$(CONFIG_PPC_PS3)          += ps3/
index 068fe4f100b0f8e609cd73576f53930c66bdbb4c..02b30ae6a68edf4b8e433e6c0f3f19674d2e6159 100644 (file)
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl")
 struct acpi_os_dpc {
        acpi_osd_exec_callback function;
        void *context;
+       struct work_struct work;
 };
 
 #ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle,    /* upper bound  */
        acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
 }
 
-static void acpi_os_execute_deferred(void *context)
+static void acpi_os_execute_deferred(struct work_struct *work)
 {
-       struct acpi_os_dpc *dpc = NULL;
-
-
-       dpc = (struct acpi_os_dpc *)context;
+       struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
        if (!dpc) {
                printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
                return;
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type,
 {
        acpi_status status = AE_OK;
        struct acpi_os_dpc *dpc;
-       struct work_struct *task;
 
        ACPI_FUNCTION_TRACE("os_queue_for_execution");
 
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type,
 
        /*
         * Allocate/initialize DPC structure.  Note that this memory will be
-        * freed by the callee.  The kernel handles the tq_struct list  in a
+        * freed by the callee.  The kernel handles the work_struct list  in a
         * way that allows us to also free its memory inside the callee.
         * Because we may want to schedule several tasks with different
         * parameters we can't use the approach some kernel code uses of
-        * having a static tq_struct.
-        * We can save time and code by allocating the DPC and tq_structs
-        * from the same memory.
+        * having a static work_struct.
         */
 
-       dpc =
-           kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
-                   GFP_ATOMIC);
+       dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
        if (!dpc)
                return_ACPI_STATUS(AE_NO_MEMORY);
 
        dpc->function = function;
        dpc->context = context;
 
-       task = (void *)(dpc + 1);
-       INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
-       if (!queue_work(kacpid_wq, task)) {
+       INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+       if (!queue_work(kacpid_wq, &dpc->work)) {
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
                                  "Call to queue_work() failed.\n"));
                kfree(dpc);
index f8ec3896b793e21c14e5352c0589a11f22fa0c7b..011c0a8a2dcc6cbef5de1dda26899ebf83cb5345 100644 (file)
@@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
  *     ata_port_queue_task - Queue port_task
  *     @ap: The ata_port to queue port_task for
  *     @fn: workqueue function to be scheduled
- *     @data: data value to pass to workqueue function
+ *     @data: data for @fn to use
  *     @delay: delay time for workqueue function
  *
  *     Schedule @fn(@data) for execution after @delay jiffies using
@@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id)
  *     LOCKING:
  *     Inherited from caller.
  */
-void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
+void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
                         unsigned long delay)
 {
        int rc;
@@ -1104,12 +1104,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
        if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
                return;
 
-       PREPARE_WORK(&ap->port_task, fn, data);
+       PREPARE_DELAYED_WORK(&ap->port_task, fn);
+       ap->port_task_data = data;
 
-       if (!delay)
-               rc = queue_work(ata_wq, &ap->port_task);
-       else
-               rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
+       rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
 
        /* rc == 0 means that another user is using port task */
        WARN_ON(rc == 0);
@@ -2305,7 +2303,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
         * DMA cycle timing is slower/equal than the fastest PIO timing.
         */
 
-       if (speed > XFER_PIO_4) {
+       if (speed > XFER_PIO_6) {
                ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
                ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
        }
@@ -4588,10 +4586,11 @@ fsm_start:
        return poll_next;
 }
 
-static void ata_pio_task(void *_data)
+static void ata_pio_task(struct work_struct *work)
 {
-       struct ata_queued_cmd *qc = _data;
-       struct ata_port *ap = qc->ap;
+       struct ata_port *ap =
+               container_of(work, struct ata_port, port_task.work);
+       struct ata_queued_cmd *qc = ap->port_task_data;
        u8 status;
        int poll_next;
 
@@ -4961,6 +4960,7 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
        if (ap->flags & ATA_FLAG_PIO_POLLING) {
                switch (qc->tf.protocol) {
                case ATA_PROT_PIO:
+               case ATA_PROT_NODATA:
                case ATA_PROT_ATAPI:
                case ATA_PROT_ATAPI_NODATA:
                        qc->tf.flags |= ATA_TFLAG_POLLING;
@@ -5635,9 +5635,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host,
        ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
 #endif
 
-       INIT_WORK(&ap->port_task, NULL, NULL);
-       INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
-       INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
+       INIT_DELAYED_WORK(&ap->port_task, NULL);
+       INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
+       INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
        INIT_LIST_HEAD(&ap->eh_done_q);
        init_waitqueue_head(&ap->eh_wait_q);
 
index 76a85dfb7307176e93107218c82e604a95b9c4b4..08ad44b3e48fe47c17db93df394890c8aad4b855 100644 (file)
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host)
        if (ap->pflags & ATA_PFLAG_LOADING)
                ap->pflags &= ~ATA_PFLAG_LOADING;
        else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
-               queue_work(ata_aux_wq, &ap->hotplug_task);
+               queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
 
        if (ap->pflags & ATA_PFLAG_RECOVERED)
                ata_port_printk(ap, KERN_INFO, "EH complete\n");
index 8eaace94d9631e9e2b64fdc3443ca26d0d41bf66..664e1377b54c7a4be21761bc9b4ea95146abee6f 100644 (file)
@@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
 
 /**
  *     ata_scsi_hotplug - SCSI part of hotplug
- *     @data: Pointer to ATA port to perform SCSI hotplug on
+ *     @work: Pointer to ATA port to perform SCSI hotplug on
  *
  *     Perform SCSI part of hotplug.  It's executed from a separate
  *     workqueue after EH completes.  This is necessary because SCSI
@@ -2973,9 +2973,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
  *     LOCKING:
  *     Kernel thread context (may sleep).
  */
-void ata_scsi_hotplug(void *data)
+void ata_scsi_hotplug(struct work_struct *work)
 {
-       struct ata_port *ap = data;
+       struct ata_port *ap =
+               container_of(work, struct ata_port, hotplug_task.work);
        int i;
 
        if (ap->pflags & ATA_PFLAG_UNLOADING) {
@@ -3076,7 +3077,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
 
 /**
  *     ata_scsi_dev_rescan - initiate scsi_rescan_device()
- *     @data: Pointer to ATA port to perform scsi_rescan_device()
+ *     @work: Pointer to ATA port to perform scsi_rescan_device()
  *
  *     After ATA pass thru (SAT) commands are executed successfully,
  *     libata need to propagate the changes to SCSI layer.  This
@@ -3086,9 +3087,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
  *     LOCKING:
  *     Kernel thread context (may sleep).
  */
-void ata_scsi_dev_rescan(void *data)
+void ata_scsi_dev_rescan(struct work_struct *work)
 {
-       struct ata_port *ap = data;
+       struct ata_port *ap =
+               container_of(work, struct ata_port, scsi_rescan_task);
        unsigned long flags;
        unsigned int i;
 
index 107b2b565229ad925f9818c21573468b018ebd02..81ae41d5f23f04a4447c7b40bf7d347d5ef2b30c 100644 (file)
@@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template;
 
 extern void ata_scsi_scan_host(struct ata_port *ap);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_hotplug(void *data);
+extern void ata_scsi_hotplug(struct work_struct *work);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
                               unsigned int buflen);
 
@@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
-extern void ata_scsi_dev_rescan(void *data);
+extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
 
 /* libata-eh.c */
index 4ca6fa5dcb421cb2d092ac0b353b51a32f1971c6..9ed7f58424a3a31214fb3b436f00510cb07a9264 100644 (file)
@@ -154,19 +154,12 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
        tuple.TupleOffset = 0;
        tuple.TupleDataMax = 255;
        tuple.Attributes = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
-       pdev->conf.ConfigBase = stk->parse.config.base;
-       pdev->conf.Present = stk->parse.config.rmask[0];
 
        /* See if we have a manufacturer identifier. Use it to set is_kme for
           vendor quirks */
-       tuple.DesiredTuple = CISTPL_MANFID;
-       if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse))
-                       is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
+       is_kme = ((pdev->manf_id == MANFID_KME) &&
+                 ((pdev->card_id == PRODID_KME_KXLC005_A) ||
+                  (pdev->card_id == PRODID_KME_KXLC005_B)));
 
        /* Not sure if this is right... look up the current Vcc */
        CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
@@ -356,8 +349,10 @@ static struct pcmcia_device_id pcmcia_devices[] = {
        PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
        PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
        PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
        PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+       PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
        PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
        PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
        PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
index a2778cf016bc2e8866423775416a48621a17433c..f055874a6ec5d303a11960fe88605c8975e7923c 100644 (file)
@@ -66,15 +66,17 @@ enum {
        board_2037x             = 0,    /* FastTrak S150 TX2plus */
        board_20319             = 1,    /* FastTrak S150 TX4 */
        board_20619             = 2,    /* FastTrak TX4000 */
-       board_20771             = 3,    /* FastTrak TX2300 */
-       board_2057x             = 4,    /* SATAII150 Tx2plus */
-       board_40518             = 5,    /* SATAII150 Tx4 */
+       board_2057x             = 3,    /* SATAII150 Tx2plus */
+       board_40518             = 4,    /* SATAII150 Tx4 */
 
        PDC_HAS_PATA            = (1 << 1), /* PDC20375/20575 has PATA */
 
+       /* PDC_CTLSTAT bit definitions */
+       PDC_DMA_ENABLE          = (1 << 7),
+       PDC_IRQ_DISABLE         = (1 << 10),
        PDC_RESET               = (1 << 11), /* HDMA reset */
 
-       PDC_COMMON_FLAGS        = ATA_FLAG_NO_LEGACY | ATA_FLAG_SRST |
+       PDC_COMMON_FLAGS        = ATA_FLAG_NO_LEGACY |
                                  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
                                  ATA_FLAG_PIO_POLLING,
 
@@ -90,7 +92,6 @@ struct pdc_port_priv {
 
 struct pdc_host_priv {
        unsigned long           flags;
-       int                     hotplug_offset;
 };
 
 static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg);
@@ -101,13 +102,16 @@ static void pdc_eng_timeout(struct ata_port *ap);
 static int pdc_port_start(struct ata_port *ap);
 static void pdc_port_stop(struct ata_port *ap);
 static void pdc_pata_phy_reset(struct ata_port *ap);
-static void pdc_sata_phy_reset(struct ata_port *ap);
 static void pdc_qc_prep(struct ata_queued_cmd *qc);
 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
 static void pdc_irq_clear(struct ata_port *ap);
 static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
 static void pdc_host_stop(struct ata_host *host);
+static void pdc_freeze(struct ata_port *ap);
+static void pdc_thaw(struct ata_port *ap);
+static void pdc_error_handler(struct ata_port *ap);
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
 
 
 static struct scsi_host_template pdc_ata_sht = {
@@ -136,11 +140,12 @@ static const struct ata_port_operations pdc_sata_ops = {
        .exec_command           = pdc_exec_command_mmio,
        .dev_select             = ata_std_dev_select,
 
-       .phy_reset              = pdc_sata_phy_reset,
-
        .qc_prep                = pdc_qc_prep,
        .qc_issue               = pdc_qc_issue_prot,
-       .eng_timeout            = pdc_eng_timeout,
+       .freeze                 = pdc_freeze,
+       .thaw                   = pdc_thaw,
+       .error_handler          = pdc_error_handler,
+       .post_internal_cmd      = pdc_post_internal_cmd,
        .data_xfer              = ata_mmio_data_xfer,
        .irq_handler            = pdc_interrupt,
        .irq_clear              = pdc_irq_clear,
@@ -198,23 +203,13 @@ static const struct ata_port_info pdc_port_info[] = {
        /* board_20619 */
        {
                .sht            = &pdc_ata_sht,
-               .flags          = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
+               .flags          = PDC_COMMON_FLAGS | ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
                .pio_mask       = 0x1f, /* pio0-4 */
                .mwdma_mask     = 0x07, /* mwdma0-2 */
                .udma_mask      = 0x7f, /* udma0-6 ; FIXME */
                .port_ops       = &pdc_pata_ops,
        },
 
-       /* board_20771 */
-       {
-               .sht            = &pdc_ata_sht,
-               .flags          = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
-               .pio_mask       = 0x1f, /* pio0-4 */
-               .mwdma_mask     = 0x07, /* mwdma0-2 */
-               .udma_mask      = 0x7f, /* udma0-6 ; FIXME */
-               .port_ops       = &pdc_sata_ops,
-       },
-
        /* board_2057x */
        {
                .sht            = &pdc_ata_sht,
@@ -244,6 +239,7 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
        { PCI_VDEVICE(PROMISE, 0x3570), board_2057x },
        { PCI_VDEVICE(PROMISE, 0x3571), board_2057x },
        { PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
+       { PCI_VDEVICE(PROMISE, 0x3577), board_2057x },
        { PCI_VDEVICE(PROMISE, 0x3d73), board_2057x },
        { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
 
@@ -256,15 +252,6 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 
        { PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
 
-/* TODO: remove all associated board_20771 code, as it completely
- * duplicates board_2037x code, unless reason for separation can be
- * divined.
- */
-#if 0
-       { PCI_VDEVICE(PROMISE, 0x3570), board_20771 },
-#endif
-       { PCI_VDEVICE(PROMISE, 0x3577), board_20771 },
-
        { }     /* terminate list */
 };
 
@@ -366,12 +353,6 @@ static void pdc_reset_port(struct ata_port *ap)
        readl(mmio);    /* flush */
 }
 
-static void pdc_sata_phy_reset(struct ata_port *ap)
-{
-       pdc_reset_port(ap);
-       sata_phy_reset(ap);
-}
-
 static void pdc_pata_cbl_detect(struct ata_port *ap)
 {
        u8 tmp;
@@ -439,6 +420,61 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
        }
 }
 
+static void pdc_freeze(struct ata_port *ap)
+{
+       void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+       u32 tmp;
+
+       tmp = readl(mmio + PDC_CTLSTAT);
+       tmp |= PDC_IRQ_DISABLE;
+       tmp &= ~PDC_DMA_ENABLE;
+       writel(tmp, mmio + PDC_CTLSTAT);
+       readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_thaw(struct ata_port *ap)
+{
+       void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
+       u32 tmp;
+
+       /* clear IRQ */
+       readl(mmio + PDC_INT_SEQMASK);
+
+       /* turn IRQ back on */
+       tmp = readl(mmio + PDC_CTLSTAT);
+       tmp &= ~PDC_IRQ_DISABLE;
+       writel(tmp, mmio + PDC_CTLSTAT);
+       readl(mmio + PDC_CTLSTAT); /* flush */
+}
+
+static void pdc_error_handler(struct ata_port *ap)
+{
+       ata_reset_fn_t hardreset;
+
+       if (!(ap->pflags & ATA_PFLAG_FROZEN))
+               pdc_reset_port(ap);
+
+       hardreset = NULL;
+       if (sata_scr_valid(ap))
+               hardreset = sata_std_hardreset;
+
+       /* perform recovery */
+       ata_do_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
+                 ata_std_postreset);
+}
+
+static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+
+       if (qc->flags & ATA_QCFLAG_FAILED)
+               qc->err_mask |= AC_ERR_OTHER;
+
+       /* make DMA engine forget about the failed command */
+       if (qc->err_mask)
+               pdc_reset_port(ap);
+}
+
 static void pdc_eng_timeout(struct ata_port *ap)
 {
        struct ata_host *host = ap->host;
@@ -645,9 +681,14 @@ static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
 {
        void __iomem *mmio = pe->mmio_base;
        struct pdc_host_priv *hp = pe->private_data;
-       int hotplug_offset = hp->hotplug_offset;
+       int hotplug_offset;
        u32 tmp;
 
+       if (hp->flags & PDC_FLAG_GEN_II)
+               hotplug_offset = PDC2_SATA_PLUG_CSR;
+       else
+               hotplug_offset = PDC_SATA_PLUG_CSR;
+
        /*
         * Except for the hotplug stuff, this is voodoo from the
         * Promise driver.  Label this entire section
@@ -742,8 +783,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
                goto err_out_free_ent;
        }
 
-       /* Set default hotplug offset */
-       hp->hotplug_offset = PDC_SATA_PLUG_CSR;
        probe_ent->private_data = hp;
 
        probe_ent->sht          = pdc_port_info[board_idx].sht;
@@ -767,8 +806,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
        switch (board_idx) {
        case board_40518:
                hp->flags |= PDC_FLAG_GEN_II;
-               /* Override hotplug offset for SATAII150 */
-               hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
                /* Fall through */
        case board_20319:
                        probe_ent->n_ports = 4;
@@ -780,10 +817,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
                probe_ent->port[3].scr_addr = base + 0x700;
                break;
        case board_2057x:
-       case board_20771:
                hp->flags |= PDC_FLAG_GEN_II;
-               /* Override hotplug offset for SATAII150 */
-               hp->hotplug_offset = PDC2_SATA_PLUG_CSR;
                /* Fall through */
        case board_2037x:
                probe_ent->n_ports = 2;
index b5077ce8cb4007b901716c89208e6b94f5012bd5..1b16f8166b094ae474b94f6da5401fe3a9179c19 100644 (file)
@@ -41,7 +41,7 @@ ifeq ($(CONFIG_ATM_FORE200E_PCA),y)
   # guess the target endianess to choose the right PCA-200E firmware image
   ifeq ($(CONFIG_ATM_FORE200E_PCA_DEFAULT_FW),y)
     byteorder.h                        := include$(if $(patsubst $(srctree),,$(objtree)),2)/asm/byteorder.h
-    CONFIG_ATM_FORE200E_PCA_FW := $(obj)/pca200e$(if $(shell $(CC) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
+    CONFIG_ATM_FORE200E_PCA_FW := $(obj)/pca200e$(if $(shell $(CC) $(CPPFLAGS) -E -dM $(byteorder.h) | grep ' __LITTLE_ENDIAN '),.bin,_ecd.bin2)
   endif
 endif
 
index c7314a79da0fffcedeb9e40ecef63ec14c994b9e..7d9b4e52f0bf4c985f1766fe2f88c35c78ea6c32 100644 (file)
@@ -820,7 +820,7 @@ he_init_group(struct he_dev *he_dev, int group)
                void *cpuaddr;
 
 #ifdef USE_RBPS_POOL 
-               cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+               cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
                if (cpuaddr == NULL)
                        return -ENOMEM;
 #else
@@ -884,7 +884,7 @@ he_init_group(struct he_dev *he_dev, int group)
                void *cpuaddr;
 
 #ifdef USE_RBPL_POOL
-               cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
+               cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
                if (cpuaddr == NULL)
                        return -ENOMEM;
 #else
@@ -1724,7 +1724,7 @@ __alloc_tpd(struct he_dev *he_dev)
        struct he_tpd *tpd;
        dma_addr_t dma_handle; 
 
-       tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);              
+       tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
        if (tpd == NULL)
                return NULL;
                        
index 87b17c33b3f958595d6fec200b55432714475927..f40786121948c4bc1cbd7cea1c14291704e1c982 100644 (file)
@@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
                               int flags);
 static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
                              char *page);
-static void idt77252_softint(void *dev_id);
+static void idt77252_softint(struct work_struct *work);
 
 
 static struct atmdev_ops idt77252_ops =
@@ -2866,9 +2866,10 @@ out:
 }
 
 static void
-idt77252_softint(void *dev_id)
+idt77252_softint(struct work_struct *work)
 {
-       struct idt77252_dev *card = dev_id;
+       struct idt77252_dev *card =
+               container_of(work, struct idt77252_dev, tqueue);
        u32 stat;
        int done;
 
@@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
        card->pcidev = pcidev;
        sprintf(card->name, "idt77252-%d", card->index);
 
-       INIT_WORK(&card->tqueue, idt77252_softint, (void *)card);
+       INIT_WORK(&card->tqueue, idt77252_softint);
 
        membase = pci_resource_start(pcidev, 1);
        srambase = pci_resource_start(pcidev, 2);
index e4b530ef757de101329753e37cc1db524dce45c2..67b79a7592a9d94c92d0e737d82638fdc5e7f74d 100644 (file)
@@ -386,6 +386,7 @@ void device_initialize(struct device *dev)
        INIT_LIST_HEAD(&dev->node);
        init_MUTEX(&dev->sem);
        device_init_wakeup(dev, 0);
+       set_dev_node(dev, -1);
 }
 
 #ifdef CONFIG_SYSFS_DEPRECATED
index 1f745f12f94e5edb70f05d5ba6d8e0ac999ac549..7fd095efaebd75d3d6e1446b7bfdee12c64e31df 100644 (file)
@@ -104,8 +104,8 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 
 /*
  * register_cpu - Setup a driverfs device for a CPU.
- * @cpu - Callers can set the cpu->no_control field to 1, to indicate not to
- *               generate a control file in sysfs for this CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+ *       sysfs for this CPU.
  * @num - CPU number to use when creating the device.
  *
  * Initialize and register the CPU device.
@@ -119,7 +119,7 @@ int __devinit register_cpu(struct cpu *cpu, int num)
 
        error = sysdev_register(&cpu->sysdev);
 
-       if (!error && !cpu->no_control)
+       if (!error && cpu->hotpluggable)
                register_cpu_control(cpu);
        if (!error)
                cpu_sys_devices[num] = &cpu->sysdev;
index b2efbd4cf710d9d0ce79474c2280d04c6bbd7c42..dbe0735f8c9e6bab0446ea0278cd749e3ea98630 100644 (file)
@@ -126,7 +126,7 @@ dma_pool_create (const char *name, struct device *dev,
        } else if (allocation < size)
                return NULL;
 
-       if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
+       if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL)))
                return retval;
 
        strlcpy (retval->name, name, sizeof retval->name);
@@ -297,7 +297,7 @@ restart:
                        }
                }
        }
-       if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
+       if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
                if (mem_flags & __GFP_WAIT) {
                        DECLARE_WAITQUEUE (wait, current);
 
index c6b7d9c4b65115054f3f9cd3591c7dbf2c75142d..74b96795d2f58606ae5d048618405c6e73b53db9 100644 (file)
@@ -290,9 +290,8 @@ static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
 
 static int block_size_init(void)
 {
-       sysfs_create_file(&memory_sysdev_class.kset.kobj,
-               &class_attr_block_size_bytes.attr);
-       return 0;
+       return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+                               &class_attr_block_size_bytes.attr);
 }
 
 /*
@@ -323,12 +322,14 @@ static CLASS_ATTR(probe, 0700, NULL, memory_probe_store);
 
 static int memory_probe_init(void)
 {
-       sysfs_create_file(&memory_sysdev_class.kset.kobj,
-               &class_attr_probe.attr);
-       return 0;
+       return sysfs_create_file(&memory_sysdev_class.kset.kobj,
+                               &class_attr_probe.attr);
 }
 #else
-#define memory_probe_init(...) do {} while (0)
+static inline int memory_probe_init(void)
+{
+       return 0;
+}
 #endif
 
 /*
@@ -431,9 +432,12 @@ int __init memory_dev_init(void)
 {
        unsigned int i;
        int ret;
+       int err;
 
        memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
        ret = sysdev_class_register(&memory_sysdev_class);
+       if (ret)
+               goto out;
 
        /*
         * Create entries for memory sections that were found
@@ -442,11 +446,19 @@ int __init memory_dev_init(void)
        for (i = 0; i < NR_MEM_SECTIONS; i++) {
                if (!valid_section_nr(i))
                        continue;
-               add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+               err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, 0);
+               if (!ret)
+                       ret = err;
        }
 
-       memory_probe_init();
-       block_size_init();
-
+       err = memory_probe_init();
+       if (!ret)
+               ret = err;
+       err = block_size_init();
+       if (!ret)
+               ret = err;
+out:
+       if (ret)
+               printk(KERN_ERR "%s() failed: %d\n", __FUNCTION__, ret);
        return ret;
 }
index 3d12b85b09623a9498627fa2889fbccf2156f27d..067a9e8bc377f2cdfe670112bb493714f0faaeaf 100644 (file)
@@ -108,7 +108,6 @@ static int __cpuinit topology_add_dev(unsigned int cpu)
        return rc;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void __cpuinit topology_remove_dev(unsigned int cpu)
 {
        struct sys_device *sys_dev = get_cpu_sysdev(cpu);
@@ -136,7 +135,6 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
        }
        return rc ? NOTIFY_BAD : NOTIFY_OK;
 }
-#endif
 
 static int __cpuinit topology_sysfs_init(void)
 {
index 742d0740310140a59e7a03ca4ab6883ee82be8e2..8d81a3a64c078bb4d2523ac51c2ef6ad6c10e7dc 100644 (file)
@@ -324,13 +324,13 @@ static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
       Command->Next = Controller->FreeCommands;
       Controller->FreeCommands = Command;
       Controller->Commands[CommandIdentifier-1] = Command;
-      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC,
+      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
                                                        &ScatterGatherDMA);
       if (ScatterGatherCPU == NULL)
          return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
 
       if (RequestSensePool != NULL) {
-         RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC,
+         RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC,
                                                &RequestSenseDMA);
          if (RequestSenseCPU == NULL) {
                 pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
index 17dc22282e148d89f8a526de5a52deb32cd73828..85072446d772311ab1c9b4bb1d4b4dc64029d7c9 100644 (file)
@@ -168,7 +168,8 @@ config BLK_CPQ_CISS_DA
 
 config CISS_SCSI_TAPE
        bool "SCSI tape drive support for Smart Array 5xxx"
-       depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS
+       depends on BLK_CPQ_CISS_DA && PROC_FS
+       depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
        help
          When enabled (Y), this option allows SCSI tape drives and SCSI medium
          changers (tape robots) to be accessed via a Compaq 5xxx array 
@@ -305,6 +306,7 @@ config BLK_DEV_LOOP
 config BLK_DEV_CRYPTOLOOP
        tristate "Cryptoloop Support"
        select CRYPTO
+       select CRYPTO_CBC
        depends on BLK_DEV_LOOP
        ---help---
          Say Y here if you want to be able to use the ciphers that are 
index 6d111228cfac1848fad99d9687b01e03e1230f7c..2308e83e5f33aff25297a2ef7ad3363a56a645ef 100644 (file)
@@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d);
 void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
 void aoecmd_ata_rsp(struct sk_buff *);
 void aoecmd_cfg_rsp(struct sk_buff *);
-void aoecmd_sleepwork(void *vp);
+void aoecmd_sleepwork(struct work_struct *);
 struct sk_buff *new_skb(ulong);
 
 int aoedev_init(void);
index aa25f8b09fe3092fc93093a29c8757c6229f6563..478489c568a4c9f9de45cc05aba97f294d80bff8 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/netdevice.h>
 #include "aoe.h"
 
-static kmem_cache_t *buf_pool_cache;
+static struct kmem_cache *buf_pool_cache;
 
 static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
 {
index 8a13b1af8babdde7468af212794fa874b0a2745e..97f7f535f41208cfcb695bd7d8f7d7b1f928c13b 100644 (file)
@@ -408,9 +408,9 @@ rexmit_timer(ulong vp)
 /* this function performs work that has been deferred until sleeping is OK
  */
 void
-aoecmd_sleepwork(void *vp)
+aoecmd_sleepwork(struct work_struct *work)
 {
-       struct aoedev *d = (struct aoedev *) vp;
+       struct aoedev *d = container_of(work, struct aoedev, work);
 
        if (d->flags & DEVFL_GDALLOC)
                aoeblk_gdalloc(d);
index 6125921bbec4d971534947fa53dc4cf5cc750838..05a97197c9181899ab7a23ca26db34082f2394b7 100644 (file)
@@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes)
                        kfree(d);
                return NULL;
        }
-       INIT_WORK(&d->work, aoecmd_sleepwork, d);
+       INIT_WORK(&d->work, aoecmd_sleepwork);
        spin_lock_init(&d->lock);
        init_timer(&d->timer);
        d->timer.data = (ulong) d;
index 4105c3bf34764da399d29e638294ead0e07a3973..892e092afe9a8235ca013febe1ed05a08e7d9291 100644 (file)
 #include <linux/completion.h>
 
 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.10)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,10)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
 
 /* Embedded module documentation macros - see modules.h */
 MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.10");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
 MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
                        " SA6i P600 P800 P400 P400i E200 E200i E500");
+MODULE_VERSION("3.6.14");
 MODULE_LICENSE("GPL");
 
 #include "cciss_cmd.h"
@@ -81,7 +82,9 @@ static const struct pci_device_id cciss_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3213},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3214},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSD,     0x103C, 0x3215},
-       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3233},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3237},
+       {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
+               PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
        {0,}
 };
 
@@ -90,27 +93,29 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
 /*  board_id = Subsystem Device ID & Vendor ID
  *  product = Marketing Name for the board
  *  access = Address of the struct of function pointers
+ *  nr_cmds = Number of commands supported by controller
  */
 static struct board_type products[] = {
-       {0x40700E11, "Smart Array 5300", &SA5_access},
-       {0x40800E11, "Smart Array 5i", &SA5B_access},
-       {0x40820E11, "Smart Array 532", &SA5B_access},
-       {0x40830E11, "Smart Array 5312", &SA5B_access},
-       {0x409A0E11, "Smart Array 641", &SA5_access},
-       {0x409B0E11, "Smart Array 642", &SA5_access},
-       {0x409C0E11, "Smart Array 6400", &SA5_access},
-       {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
-       {0x40910E11, "Smart Array 6i", &SA5_access},
-       {0x3225103C, "Smart Array P600", &SA5_access},
-       {0x3223103C, "Smart Array P800", &SA5_access},
-       {0x3234103C, "Smart Array P400", &SA5_access},
-       {0x3235103C, "Smart Array P400i", &SA5_access},
-       {0x3211103C, "Smart Array E200i", &SA5_access},
-       {0x3212103C, "Smart Array E200", &SA5_access},
-       {0x3213103C, "Smart Array E200i", &SA5_access},
-       {0x3214103C, "Smart Array E200i", &SA5_access},
-       {0x3215103C, "Smart Array E200i", &SA5_access},
-       {0x3233103C, "Smart Array E500", &SA5_access},
+       {0x40700E11, "Smart Array 5300", &SA5_access, 512},
+       {0x40800E11, "Smart Array 5i", &SA5B_access, 512},
+       {0x40820E11, "Smart Array 532", &SA5B_access, 512},
+       {0x40830E11, "Smart Array 5312", &SA5B_access, 512},
+       {0x409A0E11, "Smart Array 641", &SA5_access, 512},
+       {0x409B0E11, "Smart Array 642", &SA5_access, 512},
+       {0x409C0E11, "Smart Array 6400", &SA5_access, 512},
+       {0x409D0E11, "Smart Array 6400 EM", &SA5_access, 512},
+       {0x40910E11, "Smart Array 6i", &SA5_access, 512},
+       {0x3225103C, "Smart Array P600", &SA5_access, 512},
+       {0x3223103C, "Smart Array P800", &SA5_access, 512},
+       {0x3234103C, "Smart Array P400", &SA5_access, 512},
+       {0x3235103C, "Smart Array P400i", &SA5_access, 512},
+       {0x3211103C, "Smart Array E200i", &SA5_access, 120},
+       {0x3212103C, "Smart Array E200", &SA5_access, 120},
+       {0x3213103C, "Smart Array E200i", &SA5_access, 120},
+       {0x3214103C, "Smart Array E200i", &SA5_access, 120},
+       {0x3215103C, "Smart Array E200i", &SA5_access, 120},
+       {0x3237103C, "Smart Array E500", &SA5_access, 512},
+       {0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
 };
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -121,7 +126,6 @@ static struct board_type products[] = {
 #define MAX_CMD_RETRIES 3
 
 #define READ_AHEAD      1024
-#define NR_CMDS                 384    /* #commands that can be outstanding */
 #define MAX_CTLR       32
 
 /* Originally cciss driver only supports 8 major numbers */
@@ -137,7 +141,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
                       unsigned int cmd, unsigned long arg);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
-static int revalidate_allvol(ctlr_info_t *host);
 static int cciss_revalidate(struct gendisk *disk);
 static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
@@ -265,6 +268,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
                       "Firmware Version: %c%c%c%c\n"
                       "IRQ: %d\n"
                       "Logical drives: %d\n"
+                      "Max sectors: %d\n"
                       "Current Q depth: %d\n"
                       "Current # commands on controller: %d\n"
                       "Max Q depth since init: %d\n"
@@ -275,7 +279,9 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
                       (unsigned long)h->board_id,
                       h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
                       h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
-                      h->num_luns, h->Qdepth, h->commands_outstanding,
+                      h->num_luns,
+                      h->cciss_max_sectors,
+                      h->Qdepth, h->commands_outstanding,
                       h->maxQsinceinit, h->max_outstanding, h->maxSG);
 
        pos += size;
@@ -400,8 +406,8 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
        } else {                /* get it out of the controllers pool */
 
                do {
-                       i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
-                       if (i == NR_CMDS)
+                       i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+                       if (i == h->nr_cmds)
                                return NULL;
                } while (test_and_set_bit
                         (i & (BITS_PER_LONG - 1),
@@ -487,7 +493,7 @@ static int cciss_open(struct inode *inode, struct file *filep)
         * but I'm already using way to many device nodes to claim another one
         * for "raw controller".
         */
-       if (drv->nr_blocks == 0) {
+       if (drv->heads == 0) {
                if (iminor(inode) != 0) {       /* not node 0? */
                        /* if not node 0 make sure it is a partition = 0 */
                        if (iminor(inode) & 0x0f) {
@@ -850,9 +856,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
                }
 
        case CCISS_REVALIDVOLS:
-               if (bdev != bdev->bd_contains || drv != host->drv)
-                       return -ENXIO;
-               return revalidate_allvol(host);
+               return rebuild_lun_table(host, NULL);
 
        case CCISS_GETLUNINFO:{
                        LogvolInfo_struct luninfo;
@@ -1152,75 +1156,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
        }
 }
 
-/*
- * revalidate_allvol is for online array config utilities.  After a
- * utility reconfigures the drives in the array, it can use this function
- * (through an ioctl) to make the driver zap any previous disk structs for
- * that controller and get new ones.
- *
- * Right now I'm using the getgeometry() function to do this, but this
- * function should probably be finer grained and allow you to revalidate one
- * particular logical volume (instead of all of them on a particular
- * controller).
- */
-static int revalidate_allvol(ctlr_info_t *host)
-{
-       int ctlr = host->ctlr, i;
-       unsigned long flags;
-
-       spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-       if (host->usage_count > 1) {
-               spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-               printk(KERN_WARNING "cciss: Device busy for volume"
-                      " revalidation (usage=%d)\n", host->usage_count);
-               return -EBUSY;
-       }
-       host->usage_count++;
-       spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
-       for (i = 0; i < NWD; i++) {
-               struct gendisk *disk = host->gendisk[i];
-               if (disk) {
-                       request_queue_t *q = disk->queue;
-
-                       if (disk->flags & GENHD_FL_UP)
-                               del_gendisk(disk);
-                       if (q)
-                               blk_cleanup_queue(q);
-               }
-       }
-
-       /*
-        * Set the partition and block size structures for all volumes
-        * on this controller to zero.  We will reread all of this data
-        */
-       memset(host->drv, 0, sizeof(drive_info_struct)
-              * CISS_MAX_LUN);
-       /*
-        * Tell the array controller not to give us any interrupts while
-        * we check the new geometry.  Then turn interrupts back on when
-        * we're done.
-        */
-       host->access.set_intr_mask(host, CCISS_INTR_OFF);
-       cciss_getgeometry(ctlr);
-       host->access.set_intr_mask(host, CCISS_INTR_ON);
-
-       /* Loop through each real device */
-       for (i = 0; i < NWD; i++) {
-               struct gendisk *disk = host->gendisk[i];
-               drive_info_struct *drv = &(host->drv[i]);
-               /* we must register the controller even if no disks exist */
-               /* this is for the online array utilities */
-               if (!drv->heads && i)
-                       continue;
-               blk_queue_hardsect_size(drv->queue, drv->block_size);
-               set_capacity(disk, drv->nr_blocks);
-               add_disk(disk);
-       }
-       host->usage_count--;
-       return 0;
-}
-
 static inline void complete_buffers(struct bio *bio, int status)
 {
        while (bio) {
@@ -1243,7 +1178,7 @@ static void cciss_check_queues(ctlr_info_t *h)
         * in case the interrupt we serviced was from an ioctl and did not
         * free any new commands.
         */
-       if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+       if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
                return;
 
        /* We have room on the queue for more commands.  Now we need to queue
@@ -1262,7 +1197,7 @@ static void cciss_check_queues(ctlr_info_t *h)
                /* check to see if we have maxed out the number of commands
                 * that can be placed on the queue.
                 */
-               if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
+               if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
                        if (curr_queue == start_queue) {
                                h->next_to_run =
                                    (start_queue + 1) % (h->highest_lun + 1);
@@ -1380,6 +1315,11 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
        /* if it's the controller it's already added */
        if (drv_index) {
                disk->queue = blk_init_queue(do_cciss_request, &h->lock);
+               sprintf(disk->disk_name, "cciss/c%dd%d", ctlr, drv_index);
+               disk->major = h->major;
+               disk->first_minor = drv_index << NWD_SHIFT;
+               disk->fops = &cciss_fops;
+               disk->private_data = &h->drv[drv_index];
 
                /* Set up queue information */
                disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
@@ -1391,7 +1331,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index)
                /* This is a limit in the driver and could be eliminated. */
                blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
 
-               blk_queue_max_sectors(disk->queue, 512);
+               blk_queue_max_sectors(disk->queue, h->cciss_max_sectors);
 
                blk_queue_softirq_done(disk->queue, cciss_softirq_done);
 
@@ -1458,11 +1398,6 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
 
        /* Set busy_configuring flag for this operation */
        spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-       if (h->num_luns >= CISS_MAX_LUN) {
-               spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-               return -EINVAL;
-       }
-
        if (h->busy_configuring) {
                spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
                return -EBUSY;
@@ -1495,17 +1430,8 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
                                              0, 0, TYPE_CMD);
 
                if (return_code == IO_OK) {
-                       listlength |=
-                           (0xff & (unsigned int)(ld_buff->LUNListLength[0]))
-                           << 24;
-                       listlength |=
-                           (0xff & (unsigned int)(ld_buff->LUNListLength[1]))
-                           << 16;
-                       listlength |=
-                           (0xff & (unsigned int)(ld_buff->LUNListLength[2]))
-                           << 8;
-                       listlength |=
-                           0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+                       listlength =
+                               be32_to_cpu(*(__u32 *) ld_buff->LUNListLength);
                } else {        /* reading number of logical volumes failed */
                        printk(KERN_WARNING "cciss: report logical volume"
                               " command failed\n");
@@ -1556,6 +1482,14 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
                                if (drv_index == -1)
                                        goto freeret;
 
+                               /*Check if the gendisk needs to be allocated */
+                               if (!h->gendisk[drv_index]){
+                                       h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT);
+                                       if (!h->gendisk[drv_index]){
+                                               printk(KERN_ERR "cciss: could not allocate new disk %d\n", drv_index);
+                                               goto mem_msg;
+                                       }
+                               }
                        }
                        h->drv[drv_index].LunID = lunid;
                        cciss_update_drive_info(ctlr, drv_index);
@@ -1593,6 +1527,7 @@ static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
 static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
                           int clear_all)
 {
+       int i;
        ctlr_info_t *h = get_host(disk);
 
        if (!capable(CAP_SYS_RAWIO))
@@ -1616,9 +1551,35 @@ static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
                                del_gendisk(disk);
                        if (q) {
                                blk_cleanup_queue(q);
+                               /* Set drv->queue to NULL so that we do not try
+                                * to call blk_start_queue on this queue in the
+                                * interrupt handler
+                                */
                                drv->queue = NULL;
                        }
+                       /* If clear_all is set then we are deleting the logical
+                        * drive, not just refreshing its info.  For drives
+                        * other than disk 0 we will call put_disk.  We do not
+                        * do this for disk 0 as we need it to be able to
+                        * configure the controller.
+                       */
+                       if (clear_all){
+                               /* This isn't pretty, but we need to find the
+                                * disk in our array and NULL our the pointer.
+                                * This is so that we will call alloc_disk if
+                                * this index is used again later.
+                               */
+                               for (i=0; i < CISS_MAX_LUN; i++){
+                                       if(h->gendisk[i] == disk){
+                                               h->gendisk[i] = NULL;
+                                               break;
+                                       }
+                               }
+                               put_disk(disk);
+                       }
                }
+       } else {
+               set_capacity(disk, 0);
        }
 
        --h->num_luns;
@@ -2136,7 +2097,7 @@ static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete)
 
        /* We've sent down an abort or reset, but something else
           has completed */
-       if (srl->ncompletions >= (NR_CMDS + 2)) {
+       if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) {
                /* Uh oh.  No room to save it for later... */
                printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, "
                       "reject list overflow, command lost!\n", ctlr);
@@ -2673,7 +2634,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
                        a1 = a;
                        if ((a & 0x04)) {
                                a2 = (a >> 3);
-                               if (a2 >= NR_CMDS) {
+                               if (a2 >= h->nr_cmds) {
                                        printk(KERN_WARNING
                                               "cciss: controller cciss%d failed, stopping.\n",
                                               h->ctlr);
@@ -2827,23 +2788,21 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
                if (err > 0) {
                        printk(KERN_WARNING "cciss: only %d MSI-X vectors "
                               "available\n", err);
+                       goto default_int_mode;
                } else {
                        printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
                               err);
+                       goto default_int_mode;
                }
        }
        if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
                if (!pci_enable_msi(pdev)) {
-                       c->intr[SIMPLE_MODE_INT] = pdev->irq;
                        c->msi_vector = 1;
-                       return;
                } else {
                        printk(KERN_WARNING "cciss: MSI init failed\n");
-                       c->intr[SIMPLE_MODE_INT] = pdev->irq;
-                       return;
                }
        }
-      default_int_mode:
+default_int_mode:
 #endif                         /* CONFIG_PCI_MSI */
        /* if we get here we're going to use the default interrupt mode */
        c->intr[SIMPLE_MODE_INT] = pdev->irq;
@@ -2956,16 +2915,10 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                if (board_id == products[i].board_id) {
                        c->product_name = products[i].product_name;
                        c->access = *(products[i].access);
+                       c->nr_cmds = products[i].nr_cmds;
                        break;
                }
        }
-       if (i == ARRAY_SIZE(products)) {
-               printk(KERN_WARNING "cciss: Sorry, I don't know how"
-                      " to access the Smart Array controller %08lx\n",
-                      (unsigned long)board_id);
-               err = -ENODEV;
-               goto err_out_free_res;
-       }
        if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
            (readb(&c->cfgtable->Signature[1]) != 'I') ||
            (readb(&c->cfgtable->Signature[2]) != 'S') ||
@@ -2974,6 +2927,27 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
                err = -ENODEV;
                goto err_out_free_res;
        }
+       /* We didn't find the controller in our list. We know the
+        * signature is valid. If it's an HP device let's try to
+        * bind to the device and fire it up. Otherwise we bail.
+        */
+       if (i == ARRAY_SIZE(products)) {
+               if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
+                       c->product_name = products[i-1].product_name;
+                       c->access = *(products[i-1].access);
+                       c->nr_cmds = products[i-1].nr_cmds;
+                       printk(KERN_WARNING "cciss: This is an unknown "
+                               "Smart Array controller.\n"
+                               "cciss: Please update to the latest driver "
+                               "available from www.hp.com.\n");
+               } else {
+                       printk(KERN_WARNING "cciss: Sorry, I don't know how"
+                               " to access the Smart Array controller %08lx\n"
+                                       , (unsigned long)board_id);
+                       err = -ENODEV;
+                       goto err_out_free_res;
+               }
+       }
 #ifdef CONFIG_X86
        {
                /* Need to enable prefetch in the SCSI core for 6400 in x86 */
@@ -2984,6 +2958,17 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
        }
 #endif
 
+       /* Disabling DMA prefetch for the P600
+        * An ASIC bug may result in a prefetch beyond
+        * physical memory.
+        */
+       if(board_id == 0x3225103C) {
+               __u32 dma_prefetch;
+               dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
+               dma_prefetch |= 0x8000;
+               writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
+       }
+
 #ifdef CCISS_DEBUG
        printk("Trying to put board into Simple mode\n");
 #endif                         /* CCISS_DEBUG */
@@ -3158,13 +3143,7 @@ geo_inq:
 /* Returns -1 if no free entries are left.  */
 static int alloc_cciss_hba(void)
 {
-       struct gendisk *disk[NWD];
-       int i, n;
-       for (n = 0; n < NWD; n++) {
-               disk[n] = alloc_disk(1 << NWD_SHIFT);
-               if (!disk[n])
-                       goto out;
-       }
+       int i;
 
        for (i = 0; i < MAX_CTLR; i++) {
                if (!hba[i]) {
@@ -3172,20 +3151,18 @@ static int alloc_cciss_hba(void)
                        p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
                        if (!p)
                                goto Enomem;
-                       for (n = 0; n < NWD; n++)
-                               p->gendisk[n] = disk[n];
+                       p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
+                       if (!p->gendisk[0])
+                               goto Enomem;
                        hba[i] = p;
                        return i;
                }
        }
        printk(KERN_WARNING "cciss: This driver supports a maximum"
               " of %d controllers.\n", MAX_CTLR);
-       goto out;
-      Enomem:
+       return -1;
+Enomem:
        printk(KERN_ERR "cciss: out of memory.\n");
-      out:
-       while (n--)
-               put_disk(disk[n]);
        return -1;
 }
 
@@ -3195,7 +3172,7 @@ static void free_hba(int i)
        int n;
 
        hba[i] = NULL;
-       for (n = 0; n < NWD; n++)
+       for (n = 0; n < CISS_MAX_LUN; n++)
                put_disk(p->gendisk[n]);
        kfree(p);
 }
@@ -3208,9 +3185,8 @@ static void free_hba(int i)
 static int __devinit cciss_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *ent)
 {
-       request_queue_t *q;
        int i;
-       int j;
+       int j = 0;
        int rc;
        int dac;
 
@@ -3269,15 +3245,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
               hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
 
        hba[i]->cmd_pool_bits =
-           kmalloc(((NR_CMDS + BITS_PER_LONG -
+           kmalloc(((hba[i]->nr_cmds + BITS_PER_LONG -
                      1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
        hba[i]->cmd_pool = (CommandList_struct *)
            pci_alloc_consistent(hba[i]->pdev,
-                   NR_CMDS * sizeof(CommandList_struct),
+                   hba[i]->nr_cmds * sizeof(CommandList_struct),
                    &(hba[i]->cmd_pool_dhandle));
        hba[i]->errinfo_pool = (ErrorInfo_struct *)
            pci_alloc_consistent(hba[i]->pdev,
-                   NR_CMDS * sizeof(ErrorInfo_struct),
+                   hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
                    &(hba[i]->errinfo_pool_dhandle));
        if ((hba[i]->cmd_pool_bits == NULL)
            || (hba[i]->cmd_pool == NULL)
@@ -3288,7 +3264,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 #ifdef CONFIG_CISS_SCSI_TAPE
        hba[i]->scsi_rejects.complete =
            kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) *
-                   (NR_CMDS + 5), GFP_KERNEL);
+                   (hba[i]->nr_cmds + 5), GFP_KERNEL);
        if (hba[i]->scsi_rejects.complete == NULL) {
                printk(KERN_ERR "cciss: out of memory");
                goto clean4;
@@ -3302,7 +3278,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        /* command and error info recs zeroed out before
           they are used */
        memset(hba[i]->cmd_pool_bits, 0,
-              ((NR_CMDS + BITS_PER_LONG -
+              ((hba[i]->nr_cmds + BITS_PER_LONG -
                 1) / BITS_PER_LONG) * sizeof(unsigned long));
 
 #ifdef CCISS_DEBUG
@@ -3317,18 +3293,34 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
 
        cciss_procinit(i);
+
+       hba[i]->cciss_max_sectors = 2048;
+
        hba[i]->busy_initializing = 0;
 
-       for (j = 0; j < NWD; j++) {     /* mfm */
+       do {
                drive_info_struct *drv = &(hba[i]->drv[j]);
                struct gendisk *disk = hba[i]->gendisk[j];
+               request_queue_t *q;
+
+               /* Check if the disk was allocated already */
+               if (!disk){
+                       hba[i]->gendisk[j] = alloc_disk(1 << NWD_SHIFT);
+                       disk = hba[i]->gendisk[j];
+               }
+
+               /* Check that the disk was able to be allocated */
+               if (!disk) {
+                       printk(KERN_ERR "cciss: unable to allocate memory for disk %d\n", j);
+                       goto clean4;
+               }
 
                q = blk_init_queue(do_cciss_request, &hba[i]->lock);
                if (!q) {
                        printk(KERN_ERR
                               "cciss:  unable to allocate queue for disk %d\n",
                               j);
-                       break;
+                       goto clean4;
                }
                drv->queue = q;
 
@@ -3341,7 +3333,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
                /* This is a limit in the driver and could be eliminated. */
                blk_queue_max_phys_segments(q, MAXSGENTRIES);
 
-               blk_queue_max_sectors(q, 512);
+               blk_queue_max_sectors(q, hba[i]->cciss_max_sectors);
 
                blk_queue_softirq_done(q, cciss_softirq_done);
 
@@ -3360,7 +3352,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
                blk_queue_hardsect_size(q, drv->block_size);
                set_capacity(disk, drv->nr_blocks);
                add_disk(disk);
-       }
+               j++;
+       } while (j <= hba[i]->highest_lun);
 
        return 1;
 
@@ -3371,11 +3364,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        kfree(hba[i]->cmd_pool_bits);
        if (hba[i]->cmd_pool)
                pci_free_consistent(hba[i]->pdev,
-                                   NR_CMDS * sizeof(CommandList_struct),
+                                   hba[i]->nr_cmds * sizeof(CommandList_struct),
                                    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
        if (hba[i]->errinfo_pool)
                pci_free_consistent(hba[i]->pdev,
-                                   NR_CMDS * sizeof(ErrorInfo_struct),
+                                   hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
                                    hba[i]->errinfo_pool,
                                    hba[i]->errinfo_pool_dhandle);
        free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
@@ -3383,6 +3376,15 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
        unregister_blkdev(hba[i]->major, hba[i]->devname);
       clean1:
        hba[i]->busy_initializing = 0;
+       /* cleanup any queues that may have been initialized */
+       for (j=0; j <= hba[i]->highest_lun; j++){
+               drive_info_struct *drv = &(hba[i]->drv[j]);
+               if (drv->queue)
+                       blk_cleanup_queue(drv->queue);
+       }
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
        free_hba(i);
        return -1;
 }
@@ -3430,7 +3432,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
        remove_proc_entry(hba[i]->devname, proc_cciss);
 
        /* remove it from the disk list */
-       for (j = 0; j < NWD; j++) {
+       for (j = 0; j < CISS_MAX_LUN; j++) {
                struct gendisk *disk = hba[i]->gendisk[j];
                if (disk) {
                        request_queue_t *q = disk->queue;
@@ -3442,9 +3444,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
                }
        }
 
-       pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+       pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
                            hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-       pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(ErrorInfo_struct),
+       pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
                            hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
        kfree(hba[i]->cmd_pool_bits);
 #ifdef CONFIG_CISS_SCSI_TAPE
index 562235c1445a123b9a5f36293b6f28bf887c2ff0..b70988dd33eccec8f068f87b0c738e6e3875293f 100644 (file)
@@ -6,7 +6,6 @@
 #include "cciss_cmd.h"
 
 
-#define NWD            16
 #define NWD_SHIFT      4
 #define MAX_PART       (1 << NWD_SHIFT)
 
@@ -60,6 +59,7 @@ struct ctlr_info
        __u32   board_id;
        void __iomem *vaddr;
        unsigned long paddr;
+       int     nr_cmds; /* Number of commands allowed on this controller */
        CfgTable_struct __iomem *cfgtable;
        int     interrupts_enabled;
        int     major;
@@ -76,6 +76,7 @@ struct ctlr_info
        unsigned int intr[4];
        unsigned int msix_vector;
        unsigned int msi_vector;
+       int     cciss_max_sectors;
        BYTE    cciss_read;
        BYTE    cciss_write;
        BYTE    cciss_read_capacity;
@@ -110,7 +111,7 @@ struct ctlr_info
        int                     next_to_run;
 
        // Disk structures we need to pass back
-       struct gendisk   *gendisk[NWD];
+       struct gendisk   *gendisk[CISS_MAX_LUN];
 #ifdef CONFIG_CISS_SCSI_TAPE
        void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
        /* list of block side commands the scsi error handling sucked up */
@@ -282,6 +283,7 @@ struct board_type {
        __u32   board_id;
        char    *product_name;
        struct access_method *access;
+       int nr_cmds; /* Max cmds this kind of ctlr can handle. */
 };
 
 #define CCISS_LOCK(i)  (&hba[i]->lock)
index 4af7c4c0c7afb4610863d36142ed1e9ad19d994f..43bf5593b59bf6ed1da79abbbba46a0fea499d44 100644 (file)
@@ -55,6 +55,7 @@
 #define I2O_INT_MASK            0x34
 #define I2O_IBPOST_Q            0x40
 #define I2O_OBPOST_Q            0x44
+#define I2O_DMA1_CFG           0x214
 
 //Configuration Table
 #define CFGTBL_ChangeReq        0x00000001l
@@ -88,7 +89,7 @@ typedef union _u64bit
 //###########################################################################
 //STRUCTURES
 //###########################################################################
-#define CISS_MAX_LUN   16      
+#define CISS_MAX_LUN   1024
 #define CISS_MAX_PHYS_LUN      1024
 // SCSI-3 Cmmands 
 
index 9e6d3a87cbe3fab9fced8a0e7efc5a32c733ee11..3f1b38276e96e123e6ec772cb08a605dd8d08032 100644 (file)
@@ -992,11 +992,11 @@ static void empty(void)
 {
 }
 
-static DECLARE_WORK(floppy_work, NULL, NULL);
+static DECLARE_WORK(floppy_work, NULL);
 
 static void schedule_bh(void (*handler) (void))
 {
-       PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
+       PREPARE_WORK(&floppy_work, (work_func_t)handler);
        schedule_work(&floppy_work);
 }
 
@@ -1008,7 +1008,7 @@ static void cancel_activity(void)
 
        spin_lock_irqsave(&floppy_lock, flags);
        do_floppy = NULL;
-       PREPARE_WORK(&floppy_work, (void *)empty, NULL);
+       PREPARE_WORK(&floppy_work, (work_func_t)empty);
        del_timer(&fd_timer);
        spin_unlock_irqrestore(&floppy_lock, flags);
 }
@@ -1868,7 +1868,7 @@ static void show_floppy(void)
        printk("fdc_busy=%lu\n", fdc_busy);
        if (do_floppy)
                printk("do_floppy=%p\n", do_floppy);
-       if (floppy_work.pending)
+       if (work_pending(&floppy_work))
                printk("floppy_work.func=%p\n", floppy_work.func);
        if (timer_pending(&fd_timer))
                printk("fd_timer.function=%p\n", fd_timer.function);
@@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void)
                printk("floppy timer still active:%s\n", timeout_message);
        if (timer_pending(&fd_timer))
                printk("auxiliary floppy timer still active\n");
-       if (floppy_work.pending)
+       if (work_pending(&floppy_work))
                printk("work still pending\n");
 #endif
        old_fdc = fdc;
index 9d1035e8d9d8c713148aa589abbf2c738022c197..7bf2cfbd6285254009febcf809d4926846ec42e6 100644 (file)
@@ -355,14 +355,30 @@ harderror:
        return NULL;
 }
 
+static ssize_t pid_show(struct gendisk *disk, char *page)
+{
+       return sprintf(page, "%ld\n",
+               (long) ((struct nbd_device *)disk->private_data)->pid);
+}
+
+static struct disk_attribute pid_attr = {
+       .attr = { .name = "pid", .mode = S_IRUGO },
+       .show = pid_show,
+};
+
 static void nbd_do_it(struct nbd_device *lo)
 {
        struct request *req;
 
        BUG_ON(lo->magic != LO_MAGIC);
 
+       lo->pid = current->pid;
+       sysfs_create_file(&lo->disk->kobj, &pid_attr.attr);
+
        while ((req = nbd_read_stat(lo)) != NULL)
                nbd_end_request(req);
+
+       sysfs_remove_file(&lo->disk->kobj, &pid_attr.attr);
        return;
 }
 
index c4d696d43dc140d29c9d2b0f50970d1cbfbde9dd..2695465568ad9dde48b7563f0bfdc97404966a13 100644 (file)
@@ -149,12 +149,12 @@ static struct pi_protocol aten = {
 
 static int __init aten_init(void)
 {
-       return pi_register(&aten)-1;
+       return paride_register(&aten);
 }
 
 static void __exit aten_exit(void)
 {
-       pi_unregister( &aten );
+       paride_unregister( &aten );
 }
 
 MODULE_LICENSE("GPL");
index d462ff6b139d8b1f37a416b5ef917117772a6e5e..4f27e7392e38adc6663188805afed0e4e488b515 100644 (file)
@@ -464,12 +464,12 @@ static struct pi_protocol bpck = {
 
 static int __init bpck_init(void)
 {
-       return pi_register(&bpck)-1;
+       return paride_register(&bpck);
 }
 
 static void __exit bpck_exit(void)
 {
-       pi_unregister(&bpck);
+       paride_unregister(&bpck);
 }
 
 MODULE_LICENSE("GPL");
index 41a237c5957db19226bb35b31ec896ad9f8d0724..ad124525ac23f2d8edd34d80708b9518c177f306 100644 (file)
@@ -31,10 +31,7 @@ static int verbose; /* set this to 1 to see debugging messages and whatnot */
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <asm/io.h>
-
-#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT)
 #include <linux/parport.h>
-#endif
 
 #include "ppc6lnx.c"
 #include "paride.h"
@@ -139,11 +136,6 @@ static int bpck6_test_port ( PIA *pi )   /* check for 8-bit port */
        PPCSTRUCT(pi)->ppc_id=pi->unit;
        PPCSTRUCT(pi)->lpt_addr=pi->port;
 
-#ifdef CONFIG_PARPORT_PC_MODULE
-#define CONFIG_PARPORT_PC
-#endif
-
-#ifdef CONFIG_PARPORT_PC
        /* look at the parport device to see if what modes we can use */
        if(((struct pardevice *)(pi->pardev))->port->modes & 
                (PARPORT_MODE_EPP)
@@ -161,11 +153,6 @@ static int bpck6_test_port ( PIA *pi )   /* check for 8-bit port */
        {
                return 1;
        }
-#else
-       /* there is no way of knowing what kind of port we have
-          default to the highest mode possible */
-       return 5;
-#endif
 }
 
 static int bpck6_probe_unit ( PIA *pi )
@@ -265,12 +252,12 @@ static int __init bpck6_init(void)
        printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
        if(verbose)
                printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
-       return pi_register(&bpck6) - 1;  
+       return paride_register(&bpck6);
 }
 
 static void __exit bpck6_exit(void)
 {
-       pi_unregister(&bpck6);
+       paride_unregister(&bpck6);
 }
 
 MODULE_LICENSE("GPL");
index 43d61359d8ecc4febea357115e9cbb809e43804e..9bcd354953234d5303adcdf37139ba07bbf8e1a3 100644 (file)
@@ -205,12 +205,12 @@ static struct pi_protocol comm = {
 
 static int __init comm_init(void)
 {
-       return pi_register(&comm)-1;
+       return paride_register(&comm);
 }
 
 static void __exit comm_exit(void)
 {
-       pi_unregister(&comm);
+       paride_unregister(&comm);
 }
 
 MODULE_LICENSE("GPL");
index 04d53bf58e8c20ad47b61019eef55263d0acf76d..accc5c777cbba7efa0aca7ea09ba0cfa08df0b48 100644 (file)
@@ -220,12 +220,12 @@ static struct pi_protocol dstr = {
 
 static int __init dstr_init(void)
 {
-       return pi_register(&dstr)-1;
+       return paride_register(&dstr);
 }
 
 static void __exit dstr_exit(void)
 {
-       pi_unregister(&dstr);
+       paride_unregister(&dstr);
 }
 
 MODULE_LICENSE("GPL");
index 55d1c0a1fb90df6327b34a2e554340ae0770a5af..1bcdff77322effc30fc679596a01762f9d2672a0 100644 (file)
@@ -327,12 +327,12 @@ static int __init epat_init(void)
 #ifdef CONFIG_PARIDE_EPATC8
        epatc8 = 1;
 #endif
-       return pi_register(&epat)-1;
+       return paride_register(&epat);
 }
 
 static void __exit epat_exit(void)
 {
-       pi_unregister(&epat);
+       paride_unregister(&epat);
 }
 
 MODULE_LICENSE("GPL");
index 0f2e0c292d8245c680b41a6d4128c691b06c594b..fb0e782d055e455dd1863d6be83ef90423eb0a4a 100644 (file)
@@ -303,12 +303,12 @@ static struct pi_protocol epia = {
 
 static int __init epia_init(void)
 {
-       return pi_register(&epia)-1;
+       return paride_register(&epia);
 }
 
 static void __exit epia_exit(void)
 {
-       pi_unregister(&epia);
+       paride_unregister(&epia);
 }
 
 MODULE_LICENSE("GPL");
index e0f0691d8bc2e1150417b7b7a9f522030afe3f90..381283753ae4811ae514f716f00e46e236b2bf64 100644 (file)
@@ -138,12 +138,12 @@ static struct pi_protocol fit2 = {
 
 static int __init fit2_init(void)
 {
-       return pi_register(&fit2)-1;
+       return paride_register(&fit2);
 }
 
 static void __exit fit2_exit(void)
 {
-       pi_unregister(&fit2);
+       paride_unregister(&fit2);
 }
 
 MODULE_LICENSE("GPL");
index 15400e7bc66682dec344a2c575b79b13a9be3033..275d269458ebdfc6f87f19f6573992059e18828b 100644 (file)
@@ -198,12 +198,12 @@ static struct pi_protocol fit3 = {
 
 static int __init fit3_init(void)
 {
-       return pi_register(&fit3)-1;
+       return paride_register(&fit3);
 }
 
 static void __exit fit3_exit(void)
 {
-       pi_unregister(&fit3);
+       paride_unregister(&fit3);
 }
 
 MODULE_LICENSE("GPL");
index 5ea2904d2815aed0099bbd97bfada763fa7c6af3..4f2ba244689b40583f03f476252b592493f751e9 100644 (file)
@@ -263,12 +263,12 @@ static struct pi_protocol friq = {
 
 static int __init friq_init(void)
 {
-       return pi_register(&friq)-1;
+       return paride_register(&friq);
 }
 
 static void __exit friq_exit(void)
 {
-       pi_unregister(&friq);
+       paride_unregister(&friq);
 }
 
 MODULE_LICENSE("GPL");
index 56b3824b1538f122990af22a82f5718728b72e00..c3cde364603a593d6270342e0b5dde661c99b948 100644 (file)
@@ -300,12 +300,12 @@ static struct pi_protocol frpw = {
 
 static int __init frpw_init(void)
 {
-       return pi_register(&frpw)-1;
+       return paride_register(&frpw);
 }
 
 static void __exit frpw_exit(void)
 {
-       pi_unregister(&frpw);
+       paride_unregister(&frpw);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/jumbo b/drivers/block/paride/jumbo
deleted file mode 100644 (file)
index e793b9c..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-#
-# This script can be used to build "jumbo" modules that contain the
-# base PARIDE support, one protocol module and one high-level driver.
-#
-echo -n "High level driver [pcd] : "
-read X
-HLD=${X:-pcd}
-#
-echo -n "Protocol module [bpck] : "
-read X
-PROTO=${X:-bpck}
-#
-echo -n "Use MODVERSIONS [y] ? "
-read X
-UMODV=${X:-y}
-#
-echo -n "For SMP kernel [n] ? "
-read X
-USMP=${X:-n}
-#
-echo -n "Support PARPORT [n] ? "
-read X
-UPARP=${X:-n}
-#
-echo
-#
-case $USMP in
-       y* | Y* ) FSMP="-DCONFIG_SMP"
-                 ;;
-       *)        FSMP=""
-                 ;;
-esac
-#
-MODI="-include ../../../include/linux/modversions.h"
-#
-case $UMODV in
-       y* | Y* ) FMODV="-DMODVERSIONS $MODI"
-                 ;;
-       *)        FMODV=""
-                 ;;
-esac
-#
-case $UPARP in
-       y* | Y* ) FPARP="-DCONFIG_PARPORT"
-                 ;;
-       *)        FPARP=""
-                 ;;
-esac
-#
-TARG=$HLD-$PROTO.o
-FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]`
-FK="-D__KERNEL__ -I ../../../include"
-FLCH=-D_LINUX_CONFIG_H
-#
-echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
-#
-echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
-#
-echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
-#
-echo ld -r -o $TARG Jp.o Jb.o Jd.o
-ld -r -o $TARG Jp.o Jb.o Jd.o
-#
-#
-rm Jp.o Jb.o Jd.o
-#
index d983bcea76fe6d8978d17ae04d16bcd7069a2e4e..35999c415ee339784df83acbd13abb4bb111ec48 100644 (file)
@@ -283,13 +283,21 @@ static struct pi_protocol k971 = {
 
 static int __init kbic_init(void)
 {
-       return (pi_register(&k951)||pi_register(&k971))-1;
+       int rv;
+
+       rv = paride_register(&k951);
+       if (rv < 0)
+               return rv;
+       rv = paride_register(&k971);
+       if (rv < 0)
+               paride_unregister(&k951);
+       return rv;
 }
 
 static void __exit kbic_exit(void)
 {
-       pi_unregister(&k951);
-       pi_unregister(&k971);
+       paride_unregister(&k951);
+       paride_unregister(&k971);
 }
 
 MODULE_LICENSE("GPL");
index 6c7edbfba9a03f1699bbb60a86bd2399e9d34a79..117ab0e8ccf0afc9156f500d795487fbfd1f4894 100644 (file)
@@ -115,12 +115,12 @@ static struct pi_protocol ktti = {
 
 static int __init ktti_init(void)
 {
-       return pi_register(&ktti)-1;
+       return paride_register(&ktti);
 }
 
 static void __exit ktti_exit(void)
 {
-       pi_unregister(&ktti);
+       paride_unregister(&ktti);
 }
 
 MODULE_LICENSE("GPL");
index 9f8e01096809aaf069971adb7ca9a003caa97b1e..0173697a1a4d5d9d474f14553e3d350ec0195b7e 100644 (file)
@@ -140,12 +140,12 @@ static struct pi_protocol on20 = {
 
 static int __init on20_init(void)
 {
-       return pi_register(&on20)-1;
+       return paride_register(&on20);
 }
 
 static void __exit on20_exit(void)
 {
-       pi_unregister(&on20);
+       paride_unregister(&on20);
 }
 
 MODULE_LICENSE("GPL");
index 0f833caa210136376b4cf7ff716545cc40bbf1e8..95ba256921f20c128a4a676c4ad405bfc1cdc1aa 100644 (file)
@@ -306,12 +306,12 @@ static struct pi_protocol on26 = {
 
 static int __init on26_init(void)
 {
-       return pi_register(&on26)-1;
+       return paride_register(&on26);
 }
 
 static void __exit on26_exit(void)
 {
-       pi_unregister(&on26);
+       paride_unregister(&on26);
 }
 
 MODULE_LICENSE("GPL");
index 4b258f7836f32f0b99b2f0607e3986a7bdebcbd1..48c50f11f63b9290f943ed026b947d031716d36e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sched.h>       /* TASK_* */
-
-#ifdef CONFIG_PARPORT_MODULE
-#define CONFIG_PARPORT
-#endif
-
-#ifdef CONFIG_PARPORT
 #include <linux/parport.h>
-#endif
 
 #include "paride.h"
 
@@ -76,8 +69,6 @@ void pi_read_block(PIA * pi, char *buf, int count)
 
 EXPORT_SYMBOL(pi_read_block);
 
-#ifdef CONFIG_PARPORT
-
 static void pi_wake_up(void *p)
 {
        PIA *pi = (PIA *) p;
@@ -100,11 +91,8 @@ static void pi_wake_up(void *p)
                cont();
 }
 
-#endif
-
 int pi_schedule_claimed(PIA * pi, void (*cont) (void))
 {
-#ifdef CONFIG_PARPORT
        unsigned long flags;
 
        spin_lock_irqsave(&pi_spinlock, flags);
@@ -115,7 +103,6 @@ int pi_schedule_claimed(PIA * pi, void (*cont) (void))
        }
        pi->claimed = 1;
        spin_unlock_irqrestore(&pi_spinlock, flags);
-#endif
        return 1;
 }
 EXPORT_SYMBOL(pi_schedule_claimed);
@@ -133,20 +120,16 @@ static void pi_claim(PIA * pi)
        if (pi->claimed)
                return;
        pi->claimed = 1;
-#ifdef CONFIG_PARPORT
        if (pi->pardev)
                wait_event(pi->parq,
                           !parport_claim((struct pardevice *) pi->pardev));
-#endif
 }
 
 static void pi_unclaim(PIA * pi)
 {
        pi->claimed = 0;
-#ifdef CONFIG_PARPORT
        if (pi->pardev)
                parport_release((struct pardevice *) (pi->pardev));
-#endif
 }
 
 void pi_connect(PIA * pi)
@@ -167,21 +150,15 @@ EXPORT_SYMBOL(pi_disconnect);
 
 static void pi_unregister_parport(PIA * pi)
 {
-#ifdef CONFIG_PARPORT
        if (pi->pardev) {
                parport_unregister_device((struct pardevice *) (pi->pardev));
                pi->pardev = NULL;
        }
-#endif
 }
 
 void pi_release(PIA * pi)
 {
        pi_unregister_parport(pi);
-#ifndef CONFIG_PARPORT
-       if (pi->reserved)
-               release_region(pi->port, pi->reserved);
-#endif                         /* !CONFIG_PARPORT */
        if (pi->proto->release_proto)
                pi->proto->release_proto(pi);
        module_put(pi->proto->owner);
@@ -229,7 +206,7 @@ static int pi_test_proto(PIA * pi, char *scratch, int verbose)
        return res;
 }
 
-int pi_register(PIP * pr)
+int paride_register(PIP * pr)
 {
        int k;
 
@@ -237,24 +214,24 @@ int pi_register(PIP * pr)
                if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
                        printk("paride: %s protocol already registered\n",
                               pr->name);
-                       return 0;
+                       return -1;
                }
        k = 0;
        while ((k < MAX_PROTOS) && (protocols[k]))
                k++;
        if (k == MAX_PROTOS) {
                printk("paride: protocol table full\n");
-               return 0;
+               return -1;
        }
        protocols[k] = pr;
        pr->index = k;
        printk("paride: %s registered as protocol %d\n", pr->name, k);
-       return 1;
+       return 0;
 }
 
-EXPORT_SYMBOL(pi_register);
+EXPORT_SYMBOL(paride_register);
 
-void pi_unregister(PIP * pr)
+void paride_unregister(PIP * pr)
 {
        if (!pr)
                return;
@@ -265,12 +242,10 @@ void pi_unregister(PIP * pr)
        protocols[pr->index] = NULL;
 }
 
-EXPORT_SYMBOL(pi_unregister);
+EXPORT_SYMBOL(paride_unregister);
 
 static int pi_register_parport(PIA * pi, int verbose)
 {
-#ifdef CONFIG_PARPORT
-
        struct parport *port;
 
        port = parport_find_base(pi->port);
@@ -290,7 +265,6 @@ static int pi_register_parport(PIA * pi, int verbose)
                printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
 
        pi->parname = (char *) port->name;
-#endif
 
        return 1;
 }
@@ -447,13 +421,6 @@ int pi_init(PIA * pi, int autoprobe, int port, int mode,
                        printk("%s: Adapter not found\n", device);
                return 0;
        }
-#ifndef CONFIG_PARPORT
-       if (!request_region(pi->port, pi->reserved, pi->device)) {
-               printk(KERN_WARNING "paride: Unable to request region 0x%x\n",
-                      pi->port);
-               return 0;
-       }
-#endif                         /* !CONFIG_PARPORT */
 
        if (pi->parname)
                printk("%s: Sharing %s at 0x%x\n", pi->device,
index c6d98ef09e480640d1deaa3138b1430872e4d765..2bddbf45518bbb7596a71bf83743eff9a12c9b60 100644 (file)
@@ -163,8 +163,8 @@ struct pi_protocol {
 
 typedef struct pi_protocol PIP;
 
-extern int pi_register( PIP * );
-extern void pi_unregister ( PIP * );
+extern int paride_register( PIP * );
+extern void paride_unregister ( PIP * );
 
 #endif /* __DRIVERS_PARIDE_H__ */
 /* end of paride.h */
index ac5ba462710b2f0af37b37032804aa238bbdf2b8..c852eed91e4b6d82cdea3950271abc17e7f4762d 100644 (file)
@@ -912,12 +912,12 @@ static int __init pcd_init(void)
        int unit;
 
        if (disable)
-               return -1;
+               return -EINVAL;
 
        pcd_init_units();
 
        if (pcd_detect())
-               return -1;
+               return -ENODEV;
 
        /* get the atapi capabilities page */
        pcd_probe_capabilities();
@@ -925,7 +925,7 @@ static int __init pcd_init(void)
        if (register_blkdev(major, name)) {
                for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
                        put_disk(cd->disk);
-               return -1;
+               return -EBUSY;
        }
 
        pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
@@ -933,7 +933,7 @@ static int __init pcd_init(void)
                unregister_blkdev(major, name);
                for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
                        put_disk(cd->disk);
-               return -1;
+               return -ENOMEM;
        }
 
        for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
index 40a11e567970f30675f8b84590591146947d4e95..9d9bff23f426d815420ee1110d44c79a5e5f7fd1 100644 (file)
@@ -352,19 +352,19 @@ static enum action (*phase)(void);
 
 static void run_fsm(void);
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
-static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
 
 static void schedule_fsm(void)
 {
        if (!nice)
-               schedule_work(&fsm_tq);
+               schedule_delayed_work(&fsm_tq, 0);
        else
                schedule_delayed_work(&fsm_tq, nice-1);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
        run_fsm();
 }
index 1a9dee19efcf11cfc61d02dc383a4cb04ff222d9..7cdaa19512605d7cc28356d41132426ca0953630 100644 (file)
@@ -933,25 +933,25 @@ static int __init pf_init(void)
        int unit;
 
        if (disable)
-               return -1;
+               return -EINVAL;
 
        pf_init_units();
 
        if (pf_detect())
-               return -1;
+               return -ENODEV;
        pf_busy = 0;
 
        if (register_blkdev(major, name)) {
                for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
                        put_disk(pf->disk);
-               return -1;
+               return -EBUSY;
        }
        pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
        if (!pf_queue) {
                unregister_blkdev(major, name);
                for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
                        put_disk(pf->disk);
-               return -1;
+               return -ENOMEM;
        }
 
        blk_queue_max_phys_segments(pf_queue, cluster);
index 13f998aa1cd3ffc19ccf402fd901b91e8b0dbf2d..9970aedbb5d9a05f049b33d7cce2b7079a5436a4 100644 (file)
@@ -646,14 +646,14 @@ static int __init pg_init(void)
        int err;
 
        if (disable){
-               err = -1;
+               err = -EINVAL;
                goto out;
        }
 
        pg_init_units();
 
        if (pg_detect()) {
-               err = -1;
+               err = -ENODEV;
                goto out;
        }
 
index 932342d7a8ebdfbb3d329e04b2f040f868357c82..bc3703294143b4432e5e1e6fa6e6c28ab9ff4d6a 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 
-static void ps_tq_int( void *data);
+static void ps_tq_int(struct work_struct *work);
 
 static void (* ps_continuation)(void);
 static int (* ps_ready)(void);
@@ -45,7 +45,7 @@ static int ps_nice = 0;
 
 static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
 
-static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
 
 static void ps_set_intr(void (*continuation)(void), 
                        int (*ready)(void),
@@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void),
        if (!ps_tq_active) {
                ps_tq_active = 1;
                if (!ps_nice)
-                       schedule_work(&ps_tq);
+                       schedule_delayed_work(&ps_tq, 0);
                else
                        schedule_delayed_work(&ps_tq, ps_nice-1);
        }
        spin_unlock_irqrestore(&ps_spinlock,flags);
 }
 
-static void ps_tq_int(void *data)
+static void ps_tq_int(struct work_struct *work)
 {
        void (*con)(void);
        unsigned long flags;
@@ -92,7 +92,7 @@ static void ps_tq_int(void *data)
        }
        ps_tq_active = 1;
        if (!ps_nice)
-               schedule_work(&ps_tq);
+               schedule_delayed_work(&ps_tq, 0);
        else
                schedule_delayed_work(&ps_tq, ps_nice-1);
        spin_unlock_irqrestore(&ps_spinlock,flags);
index 35fb2663672146d0cdbd1ed15ea36da68d1bb223..c902b25e48697d938179d7002ea7522fab5aad93 100644 (file)
@@ -946,12 +946,12 @@ static int __init pt_init(void)
        int err;
 
        if (disable) {
-               err = -1;
+               err = -EINVAL;
                goto out;
        }
 
        if (pt_detect()) {
-               err = -1;
+               err = -ENODEV;
                goto out;
        }
 
index f2904f67af4711484fc0ca99c74d8dc462103d76..e45eaa264119769b639bc9be17fe8a573271567c 100644 (file)
@@ -54,7 +54,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/miscdevice.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/mutex.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_ioctl.h>
index 47d6975268ff3348f7991ed8ff215e3baacfbe95..54509eb3391bde83bc059a3443e3ee3ac8dba7c7 100644 (file)
@@ -1244,9 +1244,10 @@ out:
        return IRQ_RETVAL(handled);
 }
 
-static void carm_fsm_task (void *_data)
+static void carm_fsm_task (struct work_struct *work)
 {
-       struct carm_host *host = _data;
+       struct carm_host *host =
+               container_of(work, struct carm_host, fsm_task);
        unsigned long flags;
        unsigned int state;
        int rc, i, next_dev;
@@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        host->pdev = pdev;
        host->flags = pci_dac ? FL_DAC : 0;
        spin_lock_init(&host->lock);
-       INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+       INIT_WORK(&host->fsm_task, carm_fsm_task);
        init_completion(&host->probe_comp);
 
        for (i = 0; i < ARRAY_SIZE(host->req); i++)
index 0d5c73f0726558414efc03863c544c1843f79459..2098eff91e14721641f81c47b3c510481fbb6bcc 100644 (file)
@@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
     int stalled_pipe);
 static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
 static void ub_reset_enter(struct ub_dev *sc, int try);
-static void ub_reset_task(void *arg);
+static void ub_reset_task(struct work_struct *work);
 static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
 static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
     struct ub_capacity *ret);
@@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try)
        schedule_work(&sc->reset_work);
 }
 
-static void ub_reset_task(void *arg)
+static void ub_reset_task(struct work_struct *work)
 {
-       struct ub_dev *sc = arg;
+       struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
        unsigned long flags;
        struct list_head *p;
        struct ub_lun *lun;
@@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf,
        usb_init_urb(&sc->work_urb);
        tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
        atomic_set(&sc->poison, 0);
-       INIT_WORK(&sc->reset_work, ub_reset_task, sc);
+       INIT_WORK(&sc->reset_work, ub_reset_task);
        init_waitqueue_head(&sc->reset_wait);
 
        init_timer(&sc->work_timer);
index ec5a1b90a0a24133d1043fb19290a1d296b11125..e19ba4ebcd4e2c4763c926bfae4be82135962c0a 100644 (file)
@@ -759,6 +759,8 @@ static struct vio_driver viodasd_driver = {
        }
 };
 
+static int need_delete_probe;
+
 /*
  * Initialize the whole device driver.  Handle module and non-module
  * versions
@@ -773,46 +775,67 @@ static int __init viodasd_init(void)
 
        if (viopath_hostLp == HvLpIndexInvalid) {
                printk(VIOD_KERN_WARNING "invalid hosting partition\n");
-               return -EIO;
+               rc = -EIO;
+               goto early_fail;
        }
 
        printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
                        viopath_hostLp);
 
         /* register the block device */
-       if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) {
+       rc =  register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+       if (rc) {
                printk(VIOD_KERN_WARNING
                                "Unable to get major number %d for %s\n",
                                VIODASD_MAJOR, VIOD_GENHD_NAME);
-               return -EIO;
+               goto early_fail;
        }
        /* Actually open the path to the hosting partition */
-       if (viopath_open(viopath_hostLp, viomajorsubtype_blockio,
-                               VIOMAXREQ + 2)) {
+       rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
+                               VIOMAXREQ + 2);
+       if (rc) {
                printk(VIOD_KERN_WARNING
                       "error opening path to host partition %d\n",
                       viopath_hostLp);
-               unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
-               return -EIO;
+               goto unregister_blk;
        }
 
        /* Initialize our request handler */
        vio_setHandler(viomajorsubtype_blockio, handle_block_event);
 
        rc = vio_register_driver(&viodasd_driver);
-       if (rc == 0)
-               driver_create_file(&viodasd_driver.driver, &driver_attr_probe);
+       if (rc) {
+               printk(VIOD_KERN_WARNING "vio_register_driver failed\n");
+               goto unset_handler;
+       }
+
+       /*
+        * If this call fails, it just means that we cannot dynamically
+        * add virtual disks, but the driver will still work fine for
+        * all existing disk, so ignore the failure.
+        */
+       if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
+               need_delete_probe = 1;
+
+       return 0;
+
+unset_handler:
+       vio_clearHandler(viomajorsubtype_blockio);
+       viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
+unregister_blk:
+       unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+early_fail:
        return rc;
 }
 module_init(viodasd_init);
 
-void viodasd_exit(void)
+void __exit viodasd_exit(void)
 {
-       driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
+       if (need_delete_probe)
+               driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
        vio_unregister_driver(&viodasd_driver);
        vio_clearHandler(viomajorsubtype_blockio);
-       unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
        viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
+       unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
 }
-
 module_exit(viodasd_exit);
index 516751754aa9e6d33b4b139f84bcb10edaecb239..9256985cbe36c727af74aec9333a8c0c5049dc05 100644 (file)
@@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb)
        }
 }
 
-static void bcm203x_work(void *user_data)
+static void bcm203x_work(struct work_struct *work)
 {
-       struct bcm203x_data *data = user_data;
+       struct bcm203x_data *data =
+               container_of(work, struct bcm203x_data, work);
 
        if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
                BT_ERR("Can't submit URB");
@@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
 
        release_firmware(firmware);
 
-       INIT_WORK(&data->work, bcm203x_work, (void *) data);
+       INIT_WORK(&data->work, bcm203x_work);
 
        usb_set_intfdata(intf, data);
 
index cbc07250b8984f1c95f07539da45f89d6a914a57..acfb6a430dcc39f0fe2423151e4c568e2cdde26c 100644 (file)
@@ -892,43 +892,10 @@ static void bluecard_detach(struct pcmcia_device *link)
 }
 
 
-static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
-{
-       int i;
-
-       i = pcmcia_get_first_tuple(handle, tuple);
-       if (i != CS_SUCCESS)
-               return CS_NO_MORE_ITEMS;
-
-       i = pcmcia_get_tuple_data(handle, tuple);
-       if (i != CS_SUCCESS)
-               return i;
-
-       return pcmcia_parse_tuple(handle, tuple, parse);
-}
-
 static int bluecard_config(struct pcmcia_device *link)
 {
        bluecard_info_t *info = link->priv;
-       tuple_t tuple;
-       u_short buf[256];
-       cisparse_t parse;
-       int i, n, last_ret, last_fn;
-
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleOffset = 0;
-       tuple.TupleDataMax = 255;
-       tuple.Attributes = 0;
-
-       /* Get configuration register information */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       last_ret = first_tuple(link, &tuple, &parse);
-       if (last_ret != CS_SUCCESS) {
-               last_fn = ParseTuple;
-               goto cs_failed;
-       }
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
+       int i, n;
 
        link->conf.ConfigIndex = 0x20;
        link->io.NumPorts1 = 64;
@@ -966,9 +933,6 @@ static int bluecard_config(struct pcmcia_device *link)
 
        return 0;
 
-cs_failed:
-       cs_error(link, last_fn, last_ret);
-
 failed:
        bluecard_release(link);
        return -ENODEV;
index 3a96a0babc6af679a36fb3b2b5a954a17f09a2f9..aae3abace586ff59ce0ac769ec1b6058b745fe0d 100644 (file)
@@ -713,22 +713,7 @@ static int bt3c_config(struct pcmcia_device *link)
        u_short buf[256];
        cisparse_t parse;
        cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-       int i, j, try, last_ret, last_fn;
-
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleOffset = 0;
-       tuple.TupleDataMax = 255;
-       tuple.Attributes = 0;
-
-       /* Get configuration register information */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       last_ret = first_tuple(link, &tuple, &parse);
-       if (last_ret != CS_SUCCESS) {
-               last_fn = ParseTuple;
-               goto cs_failed;
-       }
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
+       int i, j, try;
 
        /* First pass: look for a config entry that looks normal. */
        tuple.TupleData = (cisdata_t *)buf;
@@ -802,9 +787,6 @@ found_port:
 
        return 0;
 
-cs_failed:
-       cs_error(link, last_fn, last_ret);
-
 failed:
        bt3c_release(link);
        return -ENODEV;
index 3b29086b7c3ff72d88defc8cc454f8c304b54435..92648ef2f5d02bf37e15dee1f64cd9bbd4319f89 100644 (file)
@@ -644,22 +644,7 @@ static int btuart_config(struct pcmcia_device *link)
        u_short buf[256];
        cisparse_t parse;
        cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-       int i, j, try, last_ret, last_fn;
-
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleOffset = 0;
-       tuple.TupleDataMax = 255;
-       tuple.Attributes = 0;
-
-       /* Get configuration register information */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       last_ret = first_tuple(link, &tuple, &parse);
-       if (last_ret != CS_SUCCESS) {
-               last_fn = ParseTuple;
-               goto cs_failed;
-       }
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
+       int i, j, try;
 
        /* First pass: look for a config entry that looks normal. */
        tuple.TupleData = (cisdata_t *) buf;
@@ -734,9 +719,6 @@ found_port:
 
        return 0;
 
-cs_failed:
-       cs_error(link, last_fn, last_ret);
-
 failed:
        btuart_release(link);
        return -ENODEV;
index 07eafbc5dc3a7571ee42b8dfc48c6a75973fd137..77b99eecbc49cd9d0a81472f19f3932d0a6bc122 100644 (file)
@@ -626,22 +626,7 @@ static int dtl1_config(struct pcmcia_device *link)
        u_short buf[256];
        cisparse_t parse;
        cistpl_cftable_entry_t *cf = &parse.cftable_entry;
-       int i, last_ret, last_fn;
-
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleOffset = 0;
-       tuple.TupleDataMax = 255;
-       tuple.Attributes = 0;
-
-       /* Get configuration register information */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       last_ret = first_tuple(link, &tuple, &parse);
-       if (last_ret != CS_SUCCESS) {
-               last_fn = ParseTuple;
-               goto cs_failed;
-       }
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
+       int i;
 
        tuple.TupleData = (cisdata_t *)buf;
        tuple.TupleOffset = 0;
@@ -690,9 +675,6 @@ static int dtl1_config(struct pcmcia_device *link)
 
        return 0;
 
-cs_failed:
-       cs_error(link, last_fn, last_ret);
-
 failed:
        dtl1_release(link);
        return -ENODEV;
index d0cface535fb33e851aeab4548c2bcd6097340dd..5e2c31882003e785f630597d0ba8451d0e195e3a 100644 (file)
@@ -330,7 +330,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
           reliable packet if the number of packets sent but not yet ack'ed
           is < than the winsize */
 
-       spin_lock_irqsave(&bcsp->unack.lock, flags);
+       spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
        if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
                struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
@@ -696,7 +696,7 @@ static void bcsp_timed_event(unsigned long arg)
 
        BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);
 
-       spin_lock_irqsave(&bcsp->unack.lock, flags);
+       spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
        while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
                bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07;
index 25032d7edc55e19a3f63a8e3dfd92a70ddca767c..3541690a77d443932de3b2123ba677d491f729bb 100644 (file)
@@ -101,7 +101,7 @@ static void debug(int debug_this, const char* fmt, ...)
                return;
 
        va_start(args, fmt);
-       vsprintf(s, fmt, args);
+       vsnprintf(s, sizeof(s), fmt, args);
        printk(KERN_DEBUG "optcd: %s\n", s);
        va_end(args);
 }
index ba50e5a712f23fe2fe68511274fb7f3cc1d8dbec..a1283b1ef9890cdadae175ed9049900d26dfacca 100644 (file)
@@ -770,11 +770,10 @@ static void msg(int level, const char *fmt, ...)
        
        msgnum++;
        if (msgnum>99) msgnum=0;
-       sprintf(buf, MSG_LEVEL "%s-%d [%02d]:  ", major_name, current_drive - D_S, msgnum);
        va_start(args, fmt);
-       vsprintf(&buf[18], fmt, args);
+       vsnprintf(buf, sizeof(buf), fmt, args);
        va_end(args);
-       printk(buf);
+       printk(MSG_LEVEL "%s-%d [%02d]:  %s", major_name, current_drive - D_S, msgnum, buf);
 #if KLOGD_PAUSE
        sbp_sleep(KLOGD_PAUSE); /* else messages get lost */
 #endif /* KLOGD_PAUSE */ 
index 00b17ae39736cbbb4132f85d91c0b1c80f415bd1..2f2c4efff8a31d3c5e53f80d017f8ecd88435984 100644 (file)
@@ -459,7 +459,7 @@ static const struct aper_size_info_32 nforce3_sizes[5] =
 
 /* Handle shadow device of the Nvidia NForce3 */
 /* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
-static int __devinit nforce3_agp_init(struct pci_dev *pdev)
+static int nforce3_agp_init(struct pci_dev *pdev)
 {
        u32 tmp, apbase, apbar, aplimit;
        struct pci_dev *dev1;
index e608dadece2fa0fa75878213c4012b99b1e1c28d..acb2de5e3a985db72fa6c94c05017ca162a9d7b1 100644 (file)
@@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event)
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+       struct cyclades_port *info =
+               container_of(work, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -5328,7 +5329,7 @@ cy_init(void)
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-                   INIT_WORK(&info->tqueue, do_softint, info);
+                   INIT_WORK(&info->tqueue, do_softint);
                    init_waitqueue_head(&info->open_wait);
                    init_waitqueue_head(&info->close_wait);
                    init_waitqueue_head(&info->shutdown_wait);
@@ -5403,7 +5404,7 @@ cy_init(void)
                     info->blocked_open = 0;
                     info->default_threshold = 0;
                     info->default_timeout = 0;
-                   INIT_WORK(&info->tqueue, do_softint, info);
+                   INIT_WORK(&info->tqueue, do_softint);
                    init_waitqueue_head(&info->open_wait);
                    init_waitqueue_head(&info->close_wait);
                    init_waitqueue_head(&info->shutdown_wait);
index 85f404e25c7364d2a176b2ba2d75aa9d735da57e..8ea2bea2b1830feadbeb24f1f8d6d80e943a3ad6 100644 (file)
 extern int zs_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern int dz_init(void);
-#endif
-
 #ifdef CONFIG_SERIAL_CONSOLE
 
 #ifdef CONFIG_ZS
 extern void zs_serial_console_init(void);
 #endif
 
-#ifdef CONFIG_DZ
-extern void dz_serial_console_init(void);
-#endif
-
 #endif
 
 /* rs_init - starts up the serial interface -
@@ -46,23 +38,11 @@ extern void dz_serial_console_init(void);
 
 int __init rs_init(void)
 {
-
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
        return zs_init();
-    else
-       return dz_init();
-#else
-
-#ifdef CONFIG_ZS
-    return zs_init();
-#endif
-
-#ifdef CONFIG_DZ
-    return dz_init();
-#endif
-
 #endif
+    return -ENXIO;
 }
 
 __initcall(rs_init);
@@ -76,21 +56,9 @@ __initcall(rs_init);
  */
 static int __init decserial_console_init(void)
 {
-#if defined(CONFIG_ZS) && defined(CONFIG_DZ)
+#ifdef CONFIG_ZS
     if (IOASIC)
        zs_serial_console_init();
-    else
-       dz_serial_console_init();
-#else
-
-#ifdef CONFIG_ZS
-    zs_serial_console_init();
-#endif
-
-#ifdef CONFIG_DZ
-    dz_serial_console_init();
-#endif
-
 #endif
     return 0;
 }
index 425c82336ee0310149a7c6461eeed7c5327d8d6a..19c81d2e13d01f2cb9cea0be7ecd936294dbed2a 100644 (file)
@@ -162,6 +162,7 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
 
        return 0;
 }
+EXPORT_SYMBOL(drm_sman_set_manager);
 
 static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
                                                 unsigned long owner)
index b40ae438f5315343248930f32df8f53f10b38845..ae2691942ddb16b2ffdd3a5f4703a7ddb95b694f 100644 (file)
@@ -147,14 +147,14 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
        if (address > vma->vm_end)
                return NOPAGE_SIGBUS;   /* Disallow mremap */
        if (!map)
-               return NOPAGE_OOM;      /* Nothing allocated */
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
 
        offset = address - vma->vm_start;
        i = (unsigned long)map->handle + offset;
        page = (map->type == _DRM_CONSISTENT) ?
                virt_to_page((void *)i) : vmalloc_to_page((void *)i);
        if (!page)
-               return NOPAGE_OOM;
+               return NOPAGE_SIGBUS;
        get_page(page);
 
        DRM_DEBUG("shm_nopage 0x%lx\n", address);
@@ -272,7 +272,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
        if (address > vma->vm_end)
                return NOPAGE_SIGBUS;   /* Disallow mremap */
        if (!dma->pagelist)
-               return NOPAGE_OOM;      /* Nothing allocated */
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
 
        offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
        page_nr = offset >> PAGE_SHIFT;
@@ -310,7 +310,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
        if (address > vma->vm_end)
                return NOPAGE_SIGBUS;   /* Disallow mremap */
        if (!entry->pagelist)
-               return NOPAGE_OOM;      /* Nothing allocated */
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
 
        offset = address - vma->vm_start;
        map_offset = map->offset - (unsigned long)dev->sg->virtual;
index 60c1695db3000e3611af463374ee0fd388439b07..806f9ce5f47b6a16dfb5c5ddc9943879b25239cb 100644 (file)
@@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data)
 
 
 static void 
-via_dmablit_workqueue(void *data)
+via_dmablit_workqueue(struct work_struct *work)
 {
-       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
        drm_device_t *dev = blitq->dev;
        unsigned long irqsave;
        drm_via_sg_info_t *cur_sg;
@@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev)
                        DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
                }
                DRM_INIT_WAITQUEUE(&blitq->busy_queue);
-               INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+               INIT_WORK(&blitq->wq, via_dmablit_workqueue);
                init_timer(&blitq->poll_timer);
                blitq->poll_timer.function = &via_dmablit_timer;
                blitq->poll_timer.data = (unsigned long) blitq;
index 706733c0b36a7e7af833f97580fb978454dc0ed1..7c71eb779802a9f9ed1a8dde50b3943b86c25b96 100644 (file)
@@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *,
 static int info_ioctl(struct tty_struct *, struct file *,
                     unsigned int, unsigned long);
 static void pc_set_termios(struct tty_struct *, struct termios *);
-static void do_softint(void *);
+static void do_softint(struct work_struct *work);
 static void pc_stop(struct tty_struct *);
 static void pc_start(struct tty_struct *);
 static void pc_throttle(struct tty_struct * tty);
@@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd)
 
                ch->brdchan        = bc;
                ch->mailbox        = gd; 
-               INIT_WORK(&ch->tqueue, do_softint, ch);
+               INIT_WORK(&ch->tqueue, do_softint);
                ch->board          = &boards[crd];
 
                spin_lock_irqsave(&epca_lock, flags);
@@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
 
 /* --------------------- Begin do_softint  ----------------------- */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 { /* Begin do_softint */
-       struct channel *ch = (struct channel *) private_;
+       struct channel *ch = container_of(work, struct channel, tqueue);
        /* Called in response to a modem change event */
        if (ch && ch->magic == EPCA_MAGIC)  { /* Begin EPCA_MAGIC */
                struct tty_struct *tty = ch->tty;
index 15a4ea896328cc23ad6be070357b76baf41c59bd..93b5519625130362874ed36d48fc2f0fafe630f4 100644 (file)
@@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
  * -------------------------------------------------------------------
  */
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-       struct esp_struct       *info = (struct esp_struct *) private_;
+       struct esp_struct       *info =
+               container_of(work, struct esp_struct, tqueue);
        struct tty_struct       *tty;
        
        tty = info->tty;
@@ -746,9 +747,10 @@ static void do_softint(void *private_)
  *     do_serial_hangup() -> tty->hangup() -> esp_hangup()
  * 
  */
-static void do_serial_hangup(void *private_)
+static void do_serial_hangup(struct work_struct *work)
 {
-       struct esp_struct       *info = (struct esp_struct *) private_;
+       struct esp_struct       *info =
+               container_of(work, struct esp_struct, tqueue_hangup);
        struct tty_struct       *tty;
        
        tty = info->tty;
@@ -2501,8 +2503,8 @@ static int __init espserial_init(void)
                info->magic = ESP_MAGIC;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
-               INIT_WORK(&info->tqueue, do_softint, info);
-               INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info);
+               INIT_WORK(&info->tqueue, do_softint);
+               INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
                info->config.rx_timeout = rx_timeout;
                info->config.flow_on = flow_on;
                info->config.flow_off = flow_off;
index 817dc409ac20a054c00219b4b8860b0c4d2ddff8..23b25ada65ea3c5b02405b1cf955afd17051d5f1 100644 (file)
@@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg);
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void genrtc_troutine(void *data)
+static void genrtc_troutine(struct work_struct *work)
 {
        unsigned int tmp = get_rtc_ss();
        
@@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit)
                irq_active = 1;
                stop_rtc_timers = 0;
                lostint = 0;
-               INIT_WORK(&genrtc_task, genrtc_troutine, NULL);
+               INIT_WORK(&genrtc_task, genrtc_troutine);
                oldsecs = get_rtc_ss();
                init_timer(&timer_task);
 
index 9902ffad3b12ddb26e8d064f3ea0b5f3d1a9bb66..cc2cd46bedc6ac6dac8dd7d256a3605c784128cf 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/delay.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
index 8728255c9463743046da3b69fde2b198d3fd1329..d090622f1dea2ec599040fd6aa0d967941d775ce 100644 (file)
@@ -337,11 +337,6 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp);
 static void hvcs_close(struct tty_struct *tty, struct file *filp);
 static void hvcs_hangup(struct tty_struct * tty);
 
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd);
-static void hvcs_remove_device_attrs(struct vio_dev *vdev);
-static void hvcs_create_driver_attrs(void);
-static void hvcs_remove_driver_attrs(void);
-
 static int __devinit hvcs_probe(struct vio_dev *dev,
                const struct vio_device_id *id);
 static int __devexit hvcs_remove(struct vio_dev *dev);
@@ -353,6 +348,172 @@ static void __exit hvcs_module_exit(void);
 #define HVCS_TRY_WRITE 0x00000004
 #define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ)
 
+static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
+{
+       return viod->dev.driver_data;
+}
+/* The sysfs interface for the driver and devices */
+
+static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return retval;
+}
+static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
+
+static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return retval;
+}
+static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
+
+static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
+               size_t count)
+{
+       /*
+        * Don't need this feature at the present time because firmware doesn't
+        * yet support multiple partners.
+        */
+       printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
+       return -EPERM;
+}
+
+static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return retval;
+}
+
+static DEVICE_ATTR(current_vty,
+       S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
+
+static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
+               size_t count)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+
+       /* writing a '0' to this sysfs entry will result in the disconnect. */
+       if (simple_strtol(buf, NULL, 0) != 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+
+       if (hvcsd->open_count > 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               printk(KERN_INFO "HVCS: vterm state unchanged.  "
+                               "The hvcs device node is still in use.\n");
+               return -EPERM;
+       }
+
+       if (hvcsd->connected == 0) {
+               spin_unlock_irqrestore(&hvcsd->lock, flags);
+               printk(KERN_INFO "HVCS: vterm state unchanged. The"
+                               " vty-server is not connected to a vty.\n");
+               return -EPERM;
+       }
+
+       hvcs_partner_free(hvcsd);
+       printk(KERN_INFO "HVCS: Closed vty-server@%X and"
+                       " partner vty@%X:%d connection.\n",
+                       hvcsd->vdev->unit_address,
+                       hvcsd->p_unit_address,
+                       (uint32_t)hvcsd->p_partition_ID);
+
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return count;
+}
+
+static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       retval = sprintf(buf, "%d\n", hvcsd->connected);
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return retval;
+}
+static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
+               hvcs_vterm_state_show, hvcs_vterm_state_store);
+
+static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct vio_dev *viod = to_vio_dev(dev);
+       struct hvcs_struct *hvcsd = from_vio_dev(viod);
+       unsigned long flags;
+       int retval;
+
+       spin_lock_irqsave(&hvcsd->lock, flags);
+       retval = sprintf(buf, "%d\n", hvcsd->index);
+       spin_unlock_irqrestore(&hvcsd->lock, flags);
+       return retval;
+}
+
+static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
+
+static struct attribute *hvcs_attrs[] = {
+       &dev_attr_partner_vtys.attr,
+       &dev_attr_partner_clcs.attr,
+       &dev_attr_current_vty.attr,
+       &dev_attr_vterm_state.attr,
+       &dev_attr_index.attr,
+       NULL,
+};
+
+static struct attribute_group hvcs_attr_group = {
+       .attrs = hvcs_attrs,
+};
+
+static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
+{
+       /* A 1 means it is updating, a 0 means it is done updating */
+       return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
+}
+
+static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
+               size_t count)
+{
+       if ((simple_strtol(buf, NULL, 0) != 1)
+               && (hvcs_rescan_status != 0))
+               return -EINVAL;
+
+       hvcs_rescan_status = 1;
+       printk(KERN_INFO "HVCS: rescanning partner info for all"
+               " vty-servers.\n");
+       hvcs_rescan_devices_list();
+       hvcs_rescan_status = 0;
+       return count;
+}
+
+static DRIVER_ATTR(rescan,
+       S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
+
 static void hvcs_kick(void)
 {
        hvcs_kicked = 1;
@@ -575,7 +736,7 @@ static void destroy_hvcs_struct(struct kobject *kobj)
        spin_unlock_irqrestore(&hvcsd->lock, flags);
        spin_unlock(&hvcs_structs_lock);
 
-       hvcs_remove_device_attrs(vdev);
+       sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
 
        kfree(hvcsd);
 }
@@ -608,6 +769,7 @@ static int __devinit hvcs_probe(
 {
        struct hvcs_struct *hvcsd;
        int index;
+       int retval;
 
        if (!dev || !id) {
                printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
@@ -658,14 +820,16 @@ static int __devinit hvcs_probe(
         * the hvcs_struct has been added to the devices list then the user app
         * will get -ENODEV.
         */
-
        spin_lock(&hvcs_structs_lock);
-
        list_add_tail(&(hvcsd->next), &hvcs_structs);
-
        spin_unlock(&hvcs_structs_lock);
 
-       hvcs_create_device_attrs(hvcsd);
+       retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
+       if (retval) {
+               printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
+                      hvcsd->vdev->unit_address);
+               return retval;
+       }
 
        printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
 
@@ -1354,8 +1518,10 @@ static int __init hvcs_module_init(void)
        if (!hvcs_tty_driver)
                return -ENOMEM;
 
-       if (hvcs_alloc_index_list(num_ttys_to_alloc))
-               return -ENOMEM;
+       if (hvcs_alloc_index_list(num_ttys_to_alloc)) {
+               rc = -ENOMEM;
+               goto index_fail;
+       }
 
        hvcs_tty_driver->owner = THIS_MODULE;
 
@@ -1385,41 +1551,57 @@ static int __init hvcs_module_init(void)
         * dynamically assigned major and minor numbers for our devices.
         */
        if (tty_register_driver(hvcs_tty_driver)) {
-               printk(KERN_ERR "HVCS: registration "
-                       " as a tty driver failed.\n");
-               hvcs_free_index_list();
-               put_tty_driver(hvcs_tty_driver);
-               return -EIO;
+               printk(KERN_ERR "HVCS: registration as a tty driver failed.\n");
+               rc = -EIO;
+               goto register_fail;
        }
 
        hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL);
        if (!hvcs_pi_buff) {
-               tty_unregister_driver(hvcs_tty_driver);
-               hvcs_free_index_list();
-               put_tty_driver(hvcs_tty_driver);
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto buff_alloc_fail;
        }
 
        hvcs_task = kthread_run(khvcsd, NULL, "khvcsd");
        if (IS_ERR(hvcs_task)) {
                printk(KERN_ERR "HVCS: khvcsd creation failed.  Driver not loaded.\n");
-               kfree(hvcs_pi_buff);
-               tty_unregister_driver(hvcs_tty_driver);
-               hvcs_free_index_list();
-               put_tty_driver(hvcs_tty_driver);
-               return -EIO;
+               rc = -EIO;
+               goto kthread_fail;
        }
 
        rc = vio_register_driver(&hvcs_vio_driver);
+       if (rc) {
+               printk(KERN_ERR "HVCS: can't register vio driver\n");
+               goto vio_fail;
+       }
 
        /*
         * This needs to be done AFTER the vio_register_driver() call or else
         * the kobjects won't be initialized properly.
         */
-       hvcs_create_driver_attrs();
+       rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
+       if (rc) {
+               printk(KERN_ERR "HVCS: sysfs attr create failed\n");
+               goto attr_fail;
+       }
 
        printk(KERN_INFO "HVCS: driver module inserted.\n");
 
+       return 0;
+
+attr_fail:
+       vio_unregister_driver(&hvcs_vio_driver);
+vio_fail:
+       kthread_stop(hvcs_task);
+kthread_fail:
+       kfree(hvcs_pi_buff);
+buff_alloc_fail:
+       tty_unregister_driver(hvcs_tty_driver);
+register_fail:
+       hvcs_free_index_list();
+index_fail:
+       put_tty_driver(hvcs_tty_driver);
+       hvcs_tty_driver = NULL;
        return rc;
 }
 
@@ -1441,7 +1623,7 @@ static void __exit hvcs_module_exit(void)
        hvcs_pi_buff = NULL;
        spin_unlock(&hvcs_pi_lock);
 
-       hvcs_remove_driver_attrs();
+       driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
 
        vio_unregister_driver(&hvcs_vio_driver);
 
@@ -1456,191 +1638,3 @@ static void __exit hvcs_module_exit(void)
 
 module_init(hvcs_module_init);
 module_exit(hvcs_module_exit);
-
-static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod)
-{
-       return viod->dev.driver_data;
-}
-/* The sysfs interface for the driver and devices */
-
-static ssize_t hvcs_partner_vtys_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-       retval = sprintf(buf, "%X\n", hvcsd->p_unit_address);
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return retval;
-}
-static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL);
-
-static ssize_t hvcs_partner_clcs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-       retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return retval;
-}
-static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL);
-
-static ssize_t hvcs_current_vty_store(struct device *dev, struct device_attribute *attr, const char * buf,
-               size_t count)
-{
-       /*
-        * Don't need this feature at the present time because firmware doesn't
-        * yet support multiple partners.
-        */
-       printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n");
-       return -EPERM;
-}
-
-static ssize_t hvcs_current_vty_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-       retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]);
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return retval;
-}
-
-static DEVICE_ATTR(current_vty,
-       S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store);
-
-static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribute *attr, const char *buf,
-               size_t count)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-
-       /* writing a '0' to this sysfs entry will result in the disconnect. */
-       if (simple_strtol(buf, NULL, 0) != 0)
-               return -EINVAL;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-
-       if (hvcsd->open_count > 0) {
-               spin_unlock_irqrestore(&hvcsd->lock, flags);
-               printk(KERN_INFO "HVCS: vterm state unchanged.  "
-                               "The hvcs device node is still in use.\n");
-               return -EPERM;
-       }
-
-       if (hvcsd->connected == 0) {
-               spin_unlock_irqrestore(&hvcsd->lock, flags);
-               printk(KERN_INFO "HVCS: vterm state unchanged. The"
-                               " vty-server is not connected to a vty.\n");
-               return -EPERM;
-       }
-
-       hvcs_partner_free(hvcsd);
-       printk(KERN_INFO "HVCS: Closed vty-server@%X and"
-                       " partner vty@%X:%d connection.\n",
-                       hvcsd->vdev->unit_address,
-                       hvcsd->p_unit_address,
-                       (uint32_t)hvcsd->p_partition_ID);
-
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return count;
-}
-
-static ssize_t hvcs_vterm_state_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-       retval = sprintf(buf, "%d\n", hvcsd->connected);
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return retval;
-}
-static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR,
-               hvcs_vterm_state_show, hvcs_vterm_state_store);
-
-static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct vio_dev *viod = to_vio_dev(dev);
-       struct hvcs_struct *hvcsd = from_vio_dev(viod);
-       unsigned long flags;
-       int retval;
-
-       spin_lock_irqsave(&hvcsd->lock, flags);
-       retval = sprintf(buf, "%d\n", hvcsd->index);
-       spin_unlock_irqrestore(&hvcsd->lock, flags);
-       return retval;
-}
-
-static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
-
-static struct attribute *hvcs_attrs[] = {
-       &dev_attr_partner_vtys.attr,
-       &dev_attr_partner_clcs.attr,
-       &dev_attr_current_vty.attr,
-       &dev_attr_vterm_state.attr,
-       &dev_attr_index.attr,
-       NULL,
-};
-
-static struct attribute_group hvcs_attr_group = {
-       .attrs = hvcs_attrs,
-};
-
-static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd)
-{
-       struct vio_dev *vdev = hvcsd->vdev;
-       sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static void hvcs_remove_device_attrs(struct vio_dev *vdev)
-{
-       sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-}
-
-static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf)
-{
-       /* A 1 means it is updating, a 0 means it is done updating */
-       return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status);
-}
-
-static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf,
-               size_t count)
-{
-       if ((simple_strtol(buf, NULL, 0) != 1)
-               && (hvcs_rescan_status != 0))
-               return -EINVAL;
-
-       hvcs_rescan_status = 1;
-       printk(KERN_INFO "HVCS: rescanning partner info for all"
-               " vty-servers.\n");
-       hvcs_rescan_devices_list();
-       hvcs_rescan_status = 0;
-       return count;
-}
-static DRIVER_ATTR(rescan,
-       S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store);
-
-static void hvcs_create_driver_attrs(void)
-{
-       struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-       driver_create_file(driverfs, &driver_attr_rescan);
-}
-
-static void hvcs_remove_driver_attrs(void)
-{
-       struct device_driver *driverfs = &(hvcs_vio_driver.driver);
-       driver_remove_file(driverfs, &driver_attr_rescan);
-}
index 2cf63e7305a3b067896df8a4c7bd35d77d1980c0..82a41d5b4ed0b595d8056b114673a8fd36fcd6f6 100644 (file)
@@ -69,7 +69,7 @@
 #define __ALIGNED__    __attribute__((__aligned__(sizeof(long))))
 
 struct hvsi_struct {
-       struct work_struct writer;
+       struct delayed_work writer;
        struct work_struct handshaker;
        wait_queue_head_t emptyq; /* woken when outbuf is emptied */
        wait_queue_head_t stateq; /* woken when HVSI state changes */
@@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp)
        return 0;
 }
 
-static void hvsi_handshaker(void *arg)
+static void hvsi_handshaker(struct work_struct *work)
 {
-       struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+       struct hvsi_struct *hp =
+               container_of(work, struct hvsi_struct, handshaker);
 
        if (hvsi_handshake(hp) >= 0)
                return;
@@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp)
 }
 
 /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */
-static void hvsi_write_worker(void *arg)
+static void hvsi_write_worker(struct work_struct *work)
 {
-       struct hvsi_struct *hp = (struct hvsi_struct *)arg;
+       struct hvsi_struct *hp =
+               container_of(work, struct hvsi_struct, writer.work);
        unsigned long flags;
 #ifdef DEBUG
        static long start_j = 0;
@@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void)
                }
 
                hp = &hvsi_ports[hvsi_count];
-               INIT_WORK(&hp->writer, hvsi_write_worker, hp);
-               INIT_WORK(&hp->handshaker, hvsi_handshaker, hp);
+               INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker);
+               INIT_WORK(&hp->handshaker, hvsi_handshaker);
                init_waitqueue_head(&hp->emptyq);
                init_waitqueue_head(&hp->stateq);
                spin_lock_init(&hp->lock);
index 9f7635f7517827123f3427408ab9123d60799105..5f3acd8e64b86d9e6ac8e685b61329322aada8c6 100644 (file)
@@ -3,17 +3,20 @@
 #
 
 config HW_RANDOM
-       bool "Hardware Random Number Generator Core support"
-       default y
+       tristate "Hardware Random Number Generator Core support"
+       default m
        ---help---
          Hardware Random Number Generator Core infrastructure.
 
+         To compile this driver as a module, choose M here: the
+         module will be called rng-core.
+
          If unsure, say Y.
 
 config HW_RANDOM_INTEL
        tristate "Intel HW Random Number Generator support"
        depends on HW_RANDOM && (X86 || IA64) && PCI
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
          Generator hardware found on Intel i8xx-based motherboards.
@@ -26,7 +29,7 @@ config HW_RANDOM_INTEL
 config HW_RANDOM_AMD
        tristate "AMD HW Random Number Generator support"
        depends on HW_RANDOM && X86 && PCI
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
          Generator hardware found on AMD 76x-based motherboards.
@@ -39,7 +42,7 @@ config HW_RANDOM_AMD
 config HW_RANDOM_GEODE
        tristate "AMD Geode HW Random Number Generator support"
        depends on HW_RANDOM && X86 && PCI
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
          Generator hardware found on the AMD Geode LX.
@@ -52,7 +55,7 @@ config HW_RANDOM_GEODE
 config HW_RANDOM_VIA
        tristate "VIA HW Random Number Generator support"
        depends on HW_RANDOM && X86_32
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
          Generator hardware found on VIA based motherboards.
@@ -65,7 +68,7 @@ config HW_RANDOM_VIA
 config HW_RANDOM_IXP4XX
        tristate "Intel IXP4xx NPU HW Random Number Generator support"
        depends on HW_RANDOM && ARCH_IXP4XX
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random
          Number Generator hardware found on the Intel IXP4xx NPU.
@@ -78,7 +81,7 @@ config HW_RANDOM_IXP4XX
 config HW_RANDOM_OMAP
        tristate "OMAP Random Number Generator support"
        depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
-       default y
+       default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
          Generator hardware found on OMAP16xx and OMAP24xx multimedia
index e263ae96f94052dfd2ef718ea566117b73e5778a..c41fa19454e3910dfce0a1bbe4d7d33eddca77fe 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for HW Random Number Generator (RNG) device drivers.
 #
 
-obj-$(CONFIG_HW_RANDOM) += core.o
+obj-$(CONFIG_HW_RANDOM) += rng-core.o
+rng-core-y := core.o
 obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
 obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
 obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
index baa4e721b758d84b5b5d59c7dc943cb33b1fe2d5..29277ec6b8ed837056c888806fbc994dcf49f2e7 100644 (file)
@@ -367,11 +367,6 @@ static UCHAR cc02[];
 #define CSE_NULL  3  // Replace with a null
 #define CSE_MARK  4  // Replace with a 3-character sequence (as Unix)
 
-#define  CMD_SET_REPLACEMENT(arg,ch)   \
-                       (((cmdSyntaxPtr)(ct36a))->cmd[1] = (arg), \
-                       (((cmdSyntaxPtr)(ct36a))->cmd[2] = (ch),  \
-                       (cmdSyntaxPtr)(ct36a))
-
 #define CSE_REPLACE  0x8       // Replace the errored character with the
                                                        // replacement character defined here
 
index 54d93f0345e8565ae2459ce236bd9a8be49cc8f3..78045767ec33691d7b8b89770c00f5f39b935f48 100644 (file)
@@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr);
 static void serviceOutgoingFifo(i2eBordStrPtr);
 
 // Functions defined in ip2.c as part of interrupt handling
-static void do_input(void *);
-static void do_status(void *);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 //***************
 //* Debug  Data *
@@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
                pCh->ClosingWaitTime  = 30*HZ;
 
                // Initialize task queue objects
-               INIT_WORK(&pCh->tqueue_input, do_input, pCh);
-               INIT_WORK(&pCh->tqueue_status, do_status, pCh);
+               INIT_WORK(&pCh->tqueue_input, do_input);
+               INIT_WORK(&pCh->tqueue_status, do_status);
 
 #ifdef IP2DEBUG_TRACE
                pCh->trace = ip2trace;
@@ -1016,7 +1016,6 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
        unsigned short channel;
        unsigned short stuffIndex;
        unsigned long flags;
-       int rc = 0;
 
        int bailout = 10;
 
@@ -1573,7 +1572,7 @@ i2StripFifo(i2eBordStrPtr pB)
 #ifdef USE_IQ
                        schedule_work(&pCh->tqueue_input);
 #else
-                       do_input(pCh);
+                       do_input(&pCh->tqueue_input);
 #endif
 
                        // Note we do not need to maintain any flow-control credits at this
@@ -1810,7 +1809,7 @@ i2StripFifo(i2eBordStrPtr pB)
 #ifdef USE_IQ
                                                schedule_work(&pCh->tqueue_status);
 #else
-                                               do_status(pCh);
+                                               do_status(&pCh->tqueue_status);
 #endif
                                        }
                                }
index a3f32d46d2f80300bb1dfc1a3e28a39655bd3b0e..cda2459c1d602293a5f80dc8bbb4c7430b46929e 100644 (file)
@@ -189,12 +189,12 @@ static int  ip2_tiocmset(struct tty_struct *tty, struct file *file,
                         unsigned int set, unsigned int clear);
 
 static void set_irq(int, int);
-static void ip2_interrupt_bh(i2eBordStrPtr pB);
+static void ip2_interrupt_bh(struct work_struct *work);
 static irqreturn_t ip2_interrupt(int irq, void *dev_id);
 static void ip2_poll(unsigned long arg);
 static inline void service_all_boards(void);
-static void do_input(void *p);
-static void do_status(void *p);
+static void do_input(struct work_struct *);
+static void do_status(struct work_struct *);
 
 static void ip2_wait_until_sent(PTTY,int);
 
@@ -918,7 +918,7 @@ ip2_init_board( int boardnum )
                pCh++;
        }
 ex_exit:
-       INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB);
+       INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
        return;
 
 err_release_region:
@@ -1125,8 +1125,8 @@ service_all_boards(void)
 
 
 /******************************************************************************/
-/* Function:   ip2_interrupt_bh(pB)                                           */
-/* Parameters: pB - pointer to the board structure                            */
+/* Function:   ip2_interrupt_bh(work)                                         */
+/* Parameters: work - pointer to the board structure                          */
 /* Returns:    Nothing                                                        */
 /*                                                                            */
 /* Description:                                                               */
@@ -1135,8 +1135,9 @@ service_all_boards(void)
 /*                                                                            */
 /******************************************************************************/
 static void
-ip2_interrupt_bh(i2eBordStrPtr pB)
+ip2_interrupt_bh(struct work_struct *work)
 {
+       i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
 //     pB better well be set or we have a problem!  We can only get
 //     here from the IMMEDIATE queue.  Here, we process the boards.
 //     Checking pB doesn't cost much and it saves us from the sanity checkers.
@@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg)
        ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
 }
 
-static void do_input(void *p)
+static void do_input(struct work_struct *work)
 {
-       i2ChanStrPtr pCh = p;
+       i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
        unsigned long flags;
 
        ip2trace(CHANN, ITRC_INPUT, 21, 0 );
@@ -1279,9 +1280,9 @@ static inline void  isig(int sig, struct tty_struct *tty, int flush)
        }
 }
 
-static void do_status(void *p)
+static void do_status(struct work_struct *work)
 {
-       i2ChanStrPtr pCh = p;
+       i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
        int status;
 
        status =  i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
index 0030cd8e2e956cb436ae781c854ba41df23058a9..6c59baa887a8c7d1fc1352034303b07ba85eec05 100644 (file)
 #include <linux/ipmi_msgdefs.h>                /* for completion codes */
 #include "ipmi_si_sm.h"
 
-static int bt_debug = 0x00;    /* Production value 0, see following flags */
+#define BT_DEBUG_OFF   0       /* Used in production */
+#define BT_DEBUG_ENABLE        1       /* Generic messages */
+#define BT_DEBUG_MSG   2       /* Prints all request/response buffers */
+#define BT_DEBUG_STATES        4       /* Verbose look at state changes */
+
+static int bt_debug = BT_DEBUG_OFF;
 
-#define        BT_DEBUG_ENABLE 1
-#define BT_DEBUG_MSG   2
-#define BT_DEBUG_STATES        4
 module_param(bt_debug, int, 0644);
 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
@@ -47,38 +49,54 @@ MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
    Since the Open IPMI architecture is single-message oriented at this
    stage, the queue depth of BT is of no concern. */
 
-#define BT_NORMAL_TIMEOUT      5000000 /* seconds in microseconds */
-#define BT_RETRY_LIMIT         2
-#define BT_RESET_DELAY         6000000 /* 6 seconds after warm reset */
+#define BT_NORMAL_TIMEOUT      5       /* seconds */
+#define BT_NORMAL_RETRY_LIMIT  2
+#define BT_RESET_DELAY         6       /* seconds after warm reset */
+
+/* States are written in chronological order and usually cover
+   multiple rows of the state table discussion in the IPMI spec. */
 
 enum bt_states {
-       BT_STATE_IDLE,
+       BT_STATE_IDLE = 0,      /* Order is critical in this list */
        BT_STATE_XACTION_START,
        BT_STATE_WRITE_BYTES,
-       BT_STATE_WRITE_END,
        BT_STATE_WRITE_CONSUME,
-       BT_STATE_B2H_WAIT,
-       BT_STATE_READ_END,
-       BT_STATE_RESET1,                /* These must come last */
+       BT_STATE_READ_WAIT,
+       BT_STATE_CLEAR_B2H,
+       BT_STATE_READ_BYTES,
+       BT_STATE_RESET1,        /* These must come last */
        BT_STATE_RESET2,
        BT_STATE_RESET3,
        BT_STATE_RESTART,
-       BT_STATE_HOSED
+       BT_STATE_PRINTME,
+       BT_STATE_CAPABILITIES_BEGIN,
+       BT_STATE_CAPABILITIES_END,
+       BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
+/* Macros seen at the end of state "case" blocks.  They help with legibility
+   and debugging. */
+
+#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+
+#define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
+
 struct si_sm_data {
        enum bt_states  state;
-       enum bt_states  last_state;     /* assist printing and resets */
        unsigned char   seq;            /* BT sequence number */
        struct si_sm_io *io;
-        unsigned char  write_data[IPMI_MAX_MSG_LENGTH];
-        int            write_count;
-        unsigned char  read_data[IPMI_MAX_MSG_LENGTH];
-        int            read_count;
-        int            truncated;
-        long           timeout;
-        unsigned int   error_retries;  /* end of "common" fields */
+       unsigned char   write_data[IPMI_MAX_MSG_LENGTH];
+       int             write_count;
+       unsigned char   read_data[IPMI_MAX_MSG_LENGTH];
+       int             read_count;
+       int             truncated;
+       long            timeout;        /* microseconds countdown */
+       int             error_retries;  /* end of "common" fields */
        int             nonzero_status; /* hung BMCs stay all 0 */
+       enum bt_states  complete;       /* to divert the state machine */
+       int             BT_CAP_outreqs;
+       long            BT_CAP_req2rsp;
+       int             BT_CAP_retries; /* Recommended retries */
 };
 
 #define BT_CLR_WR_PTR  0x01    /* See IPMI 1.5 table 11.6.4 */
@@ -111,86 +129,118 @@ struct si_sm_data {
 static char *state2txt(unsigned char state)
 {
        switch (state) {
-               case BT_STATE_IDLE:             return("IDLE");
-               case BT_STATE_XACTION_START:    return("XACTION");
-               case BT_STATE_WRITE_BYTES:      return("WR_BYTES");
-               case BT_STATE_WRITE_END:        return("WR_END");
-               case BT_STATE_WRITE_CONSUME:    return("WR_CONSUME");
-               case BT_STATE_B2H_WAIT:         return("B2H_WAIT");
-               case BT_STATE_READ_END:         return("RD_END");
-               case BT_STATE_RESET1:           return("RESET1");
-               case BT_STATE_RESET2:           return("RESET2");
-               case BT_STATE_RESET3:           return("RESET3");
-               case BT_STATE_RESTART:          return("RESTART");
-               case BT_STATE_HOSED:            return("HOSED");
+       case BT_STATE_IDLE:             return("IDLE");
+       case BT_STATE_XACTION_START:    return("XACTION");
+       case BT_STATE_WRITE_BYTES:      return("WR_BYTES");
+       case BT_STATE_WRITE_CONSUME:    return("WR_CONSUME");
+       case BT_STATE_READ_WAIT:        return("RD_WAIT");
+       case BT_STATE_CLEAR_B2H:        return("CLEAR_B2H");
+       case BT_STATE_READ_BYTES:       return("RD_BYTES");
+       case BT_STATE_RESET1:           return("RESET1");
+       case BT_STATE_RESET2:           return("RESET2");
+       case BT_STATE_RESET3:           return("RESET3");
+       case BT_STATE_RESTART:          return("RESTART");
+       case BT_STATE_LONG_BUSY:        return("LONG_BUSY");
+       case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
+       case BT_STATE_CAPABILITIES_END: return("CAP_END");
        }
        return("BAD STATE");
 }
 #define STATE2TXT state2txt(bt->state)
 
-static char *status2txt(unsigned char status, char *buf)
+static char *status2txt(unsigned char status)
 {
+       /*
+        * This cannot be called by two threads at the same time and
+        * the buffer is always consumed immediately, so the static is
+        * safe to use.
+        */
+       static char buf[40];
+
        strcpy(buf, "[ ");
-       if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
-       if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
-       if (status & BT_OEM0) strcat(buf, "OEM0 ");
-       if (status & BT_SMS_ATN) strcat(buf, "SMS ");
-       if (status & BT_B2H_ATN) strcat(buf, "B2H ");
-       if (status & BT_H2B_ATN) strcat(buf, "H2B ");
+       if (status & BT_B_BUSY)
+               strcat(buf, "B_BUSY ");
+       if (status & BT_H_BUSY)
+               strcat(buf, "H_BUSY ");
+       if (status & BT_OEM0)
+               strcat(buf, "OEM0 ");
+       if (status & BT_SMS_ATN)
+               strcat(buf, "SMS ");
+       if (status & BT_B2H_ATN)
+               strcat(buf, "B2H ");
+       if (status & BT_H2B_ATN)
+               strcat(buf, "H2B ");
        strcat(buf, "]");
        return buf;
 }
-#define STATUS2TXT(buf) status2txt(status, buf)
+#define STATUS2TXT status2txt(status)
+
+/* called externally at insmod time, and internally on cleanup */
 
-/* This will be called from within this module on a hosed condition */
-#define FIRST_SEQ      0
 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
 {
-       bt->state = BT_STATE_IDLE;
-       bt->last_state = BT_STATE_IDLE;
-       bt->seq = FIRST_SEQ;
-       bt->io = io;
-       bt->write_count = 0;
-       bt->read_count = 0;
-       bt->error_retries = 0;
-       bt->nonzero_status = 0;
-       bt->truncated = 0;
-       bt->timeout = BT_NORMAL_TIMEOUT;
+       memset(bt, 0, sizeof(struct si_sm_data));
+       if (bt->io != io) {             /* external: one-time only things */
+               bt->io = io;
+               bt->seq = 0;
+       }
+       bt->state = BT_STATE_IDLE;      /* start here */
+       bt->complete = BT_STATE_IDLE;   /* end here */
+       bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+       bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
+       /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
        return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
+/* Jam a completion code (probably an error) into a response */
+
+static void force_result(struct si_sm_data *bt, unsigned char completion_code)
+{
+       bt->read_data[0] = 4;                           /* # following bytes */
+       bt->read_data[1] = bt->write_data[1] | 4;       /* Odd NetFn/LUN */
+       bt->read_data[2] = bt->write_data[2];           /* seq (ignored) */
+       bt->read_data[3] = bt->write_data[3];           /* Command */
+       bt->read_data[4] = completion_code;
+       bt->read_count = 5;
+}
+
+/* The upper state machine starts here */
+
 static int bt_start_transaction(struct si_sm_data *bt,
                                unsigned char *data,
                                unsigned int size)
 {
        unsigned int i;
 
-       if ((size < 2) || (size > (IPMI_MAX_MSG_LENGTH - 2)))
-              return -1;
+       if (size < 2)
+               return IPMI_REQ_LEN_INVALID_ERR;
+       if (size > IPMI_MAX_MSG_LENGTH)
+               return IPMI_REQ_LEN_EXCEEDED_ERR;
 
-       if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
-               return -2;
+       if (bt->state == BT_STATE_LONG_BUSY)
+               return IPMI_NODE_BUSY_ERR;
+
+       if (bt->state != BT_STATE_IDLE)
+               return IPMI_NOT_IN_MY_STATE_ERR;
 
        if (bt_debug & BT_DEBUG_MSG) {
-               printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
-               printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
+               printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
+               printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
                for (i = 0; i < size; i ++)
-                      printk (" %02x", data[i]);
+                       printk (" %02x", data[i]);
                printk("\n");
        }
        bt->write_data[0] = size + 1;   /* all data plus seq byte */
        bt->write_data[1] = *data;      /* NetFn/LUN */
-       bt->write_data[2] = bt->seq;
+       bt->write_data[2] = bt->seq++;
        memcpy(bt->write_data + 3, data + 1, size - 1);
        bt->write_count = size + 2;
-
        bt->error_retries = 0;
        bt->nonzero_status = 0;
-       bt->read_count = 0;
        bt->truncated = 0;
        bt->state = BT_STATE_XACTION_START;
-       bt->last_state = BT_STATE_IDLE;
-       bt->timeout = BT_NORMAL_TIMEOUT;
+       bt->timeout = bt->BT_CAP_req2rsp;
+       force_result(bt, IPMI_ERR_UNSPECIFIED);
        return 0;
 }
 
@@ -198,38 +248,30 @@ static int bt_start_transaction(struct si_sm_data *bt,
    it calls this.  Strip out the length and seq bytes. */
 
 static int bt_get_result(struct si_sm_data *bt,
-                          unsigned char *data,
-                          unsigned int length)
+                        unsigned char *data,
+                        unsigned int length)
 {
        int i, msg_len;
 
        msg_len = bt->read_count - 2;           /* account for length & seq */
-       /* Always NetFn, Cmd, cCode */
        if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
-               printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len);
-               data[0] = bt->write_data[1] | 0x4;      /* Kludge a response */
-               data[1] = bt->write_data[3];
-               data[2] = IPMI_ERR_UNSPECIFIED;
+               force_result(bt, IPMI_ERR_UNSPECIFIED);
                msg_len = 3;
-       } else {
-               data[0] = bt->read_data[1];
-               data[1] = bt->read_data[3];
-               if (length < msg_len)
-                      bt->truncated = 1;
-               if (bt->truncated) {    /* can be set in read_all_bytes() */
-                       data[2] = IPMI_ERR_MSG_TRUNCATED;
-                       msg_len = 3;
-               } else
-                      memcpy(data + 2, bt->read_data + 4, msg_len - 2);
+       }
+       data[0] = bt->read_data[1];
+       data[1] = bt->read_data[3];
+       if (length < msg_len || bt->truncated) {
+               data[2] = IPMI_ERR_MSG_TRUNCATED;
+               msg_len = 3;
+       } else
+               memcpy(data + 2, bt->read_data + 4, msg_len - 2);
 
-               if (bt_debug & BT_DEBUG_MSG) {
-                       printk (KERN_WARNING "BT: res (raw)");
-                       for (i = 0; i < msg_len; i++)
-                              printk(" %02x", data[i]);
-                       printk ("\n");
-               }
+       if (bt_debug & BT_DEBUG_MSG) {
+               printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+               for (i = 0; i < msg_len; i++)
+                       printk(" %02x", data[i]);
+               printk ("\n");
        }
-       bt->read_count = 0;     /* paranoia */
        return msg_len;
 }
 
@@ -238,22 +280,40 @@ static int bt_get_result(struct si_sm_data *bt,
 
 static void reset_flags(struct si_sm_data *bt)
 {
+       if (bt_debug)
+               printk(KERN_WARNING "IPMI BT: flag reset %s\n",
+                                       status2txt(BT_STATUS));
        if (BT_STATUS & BT_H_BUSY)
-              BT_CONTROL(BT_H_BUSY);
-       if (BT_STATUS & BT_B_BUSY)
-              BT_CONTROL(BT_B_BUSY);
-       BT_CONTROL(BT_CLR_WR_PTR);
-       BT_CONTROL(BT_SMS_ATN);
-
-       if (BT_STATUS & BT_B2H_ATN) {
-               int i;
-               BT_CONTROL(BT_H_BUSY);
-               BT_CONTROL(BT_B2H_ATN);
-               BT_CONTROL(BT_CLR_RD_PTR);
-               for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
-                      BMC2HOST;
-               BT_CONTROL(BT_H_BUSY);
-       }
+               BT_CONTROL(BT_H_BUSY);  /* force clear */
+       BT_CONTROL(BT_CLR_WR_PTR);      /* always reset */
+       BT_CONTROL(BT_SMS_ATN);         /* always clear */
+       BT_INTMASK_W(BT_BMC_HWRST);
+}
+
+/* Get rid of an unwanted/stale response.  This should only be needed for
+   BMCs that support multiple outstanding requests. */
+
+static void drain_BMC2HOST(struct si_sm_data *bt)
+{
+       int i, size;
+
+       if (!(BT_STATUS & BT_B2H_ATN))  /* Not signalling a response */
+               return;
+
+       BT_CONTROL(BT_H_BUSY);          /* now set */
+       BT_CONTROL(BT_B2H_ATN);         /* always clear */
+       BT_STATUS;                      /* pause */
+       BT_CONTROL(BT_B2H_ATN);         /* some BMCs are stubborn */
+       BT_CONTROL(BT_CLR_RD_PTR);      /* always reset */
+       if (bt_debug)
+               printk(KERN_WARNING "IPMI BT: stale response %s; ",
+                       status2txt(BT_STATUS));
+       size = BMC2HOST;
+       for (i = 0; i < size ; i++)
+               BMC2HOST;
+       BT_CONTROL(BT_H_BUSY);          /* now clear */
+       if (bt_debug)
+               printk("drained %d bytes\n", size + 1);
 }
 
 static inline void write_all_bytes(struct si_sm_data *bt)
@@ -261,201 +321,256 @@ static inline void write_all_bytes(struct si_sm_data *bt)
        int i;
 
        if (bt_debug & BT_DEBUG_MSG) {
-               printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
+               printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
                        bt->write_count, bt->seq);
                for (i = 0; i < bt->write_count; i++)
                        printk (" %02x", bt->write_data[i]);
                printk ("\n");
        }
        for (i = 0; i < bt->write_count; i++)
-              HOST2BMC(bt->write_data[i]);
+               HOST2BMC(bt->write_data[i]);
 }
 
 static inline int read_all_bytes(struct si_sm_data *bt)
 {
        unsigned char i;
 
+       /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+          Keep layout of first four bytes aligned with write_data[] */
+
        bt->read_data[0] = BMC2HOST;
        bt->read_count = bt->read_data[0];
-       if (bt_debug & BT_DEBUG_MSG)
-               printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
 
-       /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
-          following the length byte. */
        if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
                if (bt_debug & BT_DEBUG_MSG)
-                       printk("bad length %d\n", bt->read_count);
+                       printk(KERN_WARNING "BT: bad raw rsp len=%d\n",
+                               bt->read_count);
                bt->truncated = 1;
                return 1;       /* let next XACTION START clean it up */
        }
        for (i = 1; i <= bt->read_count; i++)
-              bt->read_data[i] = BMC2HOST;
-       bt->read_count++;       /* account for the length byte */
+               bt->read_data[i] = BMC2HOST;
+       bt->read_count++;       /* Account internally for length byte */
 
        if (bt_debug & BT_DEBUG_MSG) {
-               for (i = 0; i < bt->read_count; i++)
+               int max = bt->read_count;
+
+               printk(KERN_WARNING "BT: got %d bytes seq=0x%02X",
+                       max, bt->read_data[2]);
+               if (max > 16)
+                       max = 16;
+               for (i = 0; i < max; i++)
                        printk (" %02x", bt->read_data[i]);
-               printk ("\n");
+               printk ("%s\n", bt->read_count == max ? "" : " ...");
        }
-       if (bt->seq != bt->write_data[2])       /* idiot check */
-               printk(KERN_DEBUG "BT: internal error: sequence mismatch\n");
 
-       /* per the spec, the (NetFn, Seq, Cmd) tuples should match */
-       if ((bt->read_data[3] == bt->write_data[3]) &&          /* Cmd */
-               (bt->read_data[2] == bt->write_data[2]) &&      /* Sequence */
-               ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
+       /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
+       if ((bt->read_data[3] == bt->write_data[3]) &&
+           (bt->read_data[2] == bt->write_data[2]) &&
+           ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
                        return 1;
 
        if (bt_debug & BT_DEBUG_MSG)
-              printk(KERN_WARNING "BT: bad packet: "
+               printk(KERN_WARNING "IPMI BT: bad packet: "
                "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
-               bt->write_data[1], bt->write_data[2], bt->write_data[3],
+               bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3],
                bt->read_data[1],  bt->read_data[2],  bt->read_data[3]);
        return 0;
 }
 
-/* Modifies bt->state appropriately, need to get into the bt_event() switch */
+/* Restart if retries are left, or return an error completion code */
 
-static void error_recovery(struct si_sm_data *bt, char *reason)
+static enum si_sm_result error_recovery(struct si_sm_data *bt,
+                                       unsigned char status,
+                                       unsigned char cCode)
 {
-       unsigned char status;
-       char buf[40]; /* For getting status */
+       char *reason;
 
-       bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
+       bt->timeout = bt->BT_CAP_req2rsp;
 
-       status = BT_STATUS;
-       printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT,
-              STATUS2TXT(buf));
+       switch (cCode) {
+       case IPMI_TIMEOUT_ERR:
+               reason = "timeout";
+               break;
+       default:
+               reason = "internal error";
+               break;
+       }
+
+       printk(KERN_WARNING "IPMI BT: %s in %s %s ",    /* open-ended line */
+               reason, STATE2TXT, STATUS2TXT);
 
+       /* Per the IPMI spec, retries are based on the sequence number
+          known only to this module, so manage a restart here. */
        (bt->error_retries)++;
-       if (bt->error_retries > BT_RETRY_LIMIT) {
-               printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
-               bt->state = BT_STATE_HOSED;
-               if (!bt->nonzero_status)
-                       printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
-               else if (bt->error_retries <= BT_RETRY_LIMIT + 1) {
-                       printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n");
-                       bt->state = BT_STATE_RESET1;
-               }
-       return;
+       if (bt->error_retries < bt->BT_CAP_retries) {
+               printk("%d retries left\n",
+                       bt->BT_CAP_retries - bt->error_retries);
+               bt->state = BT_STATE_RESTART;
+               return SI_SM_CALL_WITHOUT_DELAY;
        }
 
-       /* Sometimes the BMC queues get in an "off-by-one" state...*/
-       if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
-               printk(KERN_DEBUG "retry B2H_WAIT\n");
-               return;
+       printk("failed %d retries, sending error response\n",
+               bt->BT_CAP_retries);
+       if (!bt->nonzero_status)
+               printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
+
+       /* this is most likely during insmod */
+       else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) {
+               printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
+               bt->state = BT_STATE_RESET1;
+               return SI_SM_CALL_WITHOUT_DELAY;
        }
 
-       printk(KERN_DEBUG "restart command\n");
-       bt->state = BT_STATE_RESTART;
+       /* Concoct a useful error message, set up the next state, and
+          be done with this sequence. */
+
+       bt->state = BT_STATE_IDLE;
+       switch (cCode) {
+       case IPMI_TIMEOUT_ERR:
+               if (status & BT_B_BUSY) {
+                       cCode = IPMI_NODE_BUSY_ERR;
+                       bt->state = BT_STATE_LONG_BUSY;
+               }
+               break;
+       default:
+               break;
+       }
+       force_result(bt, cCode);
+       return SI_SM_TRANSACTION_COMPLETE;
 }
 
-/* Check the status and (possibly) advance the BT state machine.  The
-   default return is SI_SM_CALL_WITH_DELAY. */
+/* Check status and (usually) take action and change this state machine. */
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-       unsigned char status;
-       char buf[40]; /* For getting status */
+       unsigned char status, BT_CAP[8];
+       static enum bt_states last_printed = BT_STATE_PRINTME;
        int i;
 
        status = BT_STATUS;
        bt->nonzero_status |= status;
-
-       if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
+       if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
                printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
                        STATE2TXT,
-                       STATUS2TXT(buf),
+                       STATUS2TXT,
                        bt->timeout,
                        time);
-       bt->last_state = bt->state;
+               last_printed = bt->state;
+       }
 
-       if (bt->state == BT_STATE_HOSED)
-              return SI_SM_HOSED;
+       /* Commands that time out may still (eventually) provide a response.
+          This stale response will get in the way of a new response so remove
+          it if possible (hopefully during IDLE).  Even if it comes up later
+          it will be rejected by its (now-forgotten) seq number. */
+
+       if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
+               drain_BMC2HOST(bt);
+               BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+       }
 
-       if (bt->state != BT_STATE_IDLE) {       /* do timeout test */
+       if ((bt->state != BT_STATE_IDLE) &&
+           (bt->state <  BT_STATE_PRINTME)) {          /* check timeout */
                bt->timeout -= time;
-               if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
-                       error_recovery(bt, "timed out");
-                       return SI_SM_CALL_WITHOUT_DELAY;
-               }
+               if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
+                       return error_recovery(bt,
+                                             status,
+                                             IPMI_TIMEOUT_ERR);
        }
 
        switch (bt->state) {
 
-       case BT_STATE_IDLE:     /* check for asynchronous messages */
+       /* Idle state first checks for asynchronous messages from another
+          channel, then does some opportunistic housekeeping. */
+
+       case BT_STATE_IDLE:
                if (status & BT_SMS_ATN) {
                        BT_CONTROL(BT_SMS_ATN); /* clear it */
                        return SI_SM_ATTN;
                }
-               return SI_SM_IDLE;
 
-       case BT_STATE_XACTION_START:
-               if (status & BT_H_BUSY) {
+               if (status & BT_H_BUSY)         /* clear a leftover H_BUSY */
                        BT_CONTROL(BT_H_BUSY);
-                       break;
-               }
-               if (status & BT_B2H_ATN)
-                      break;
-               bt->state = BT_STATE_WRITE_BYTES;
-               return SI_SM_CALL_WITHOUT_DELAY;        /* for logging */
 
-       case BT_STATE_WRITE_BYTES:
+               /* Read BT capabilities if it hasn't been done yet */
+               if (!bt->BT_CAP_outreqs)
+                       BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+                                       SI_SM_CALL_WITHOUT_DELAY);
+               bt->timeout = bt->BT_CAP_req2rsp;
+               BT_SI_SM_RETURN(SI_SM_IDLE);
+
+       case BT_STATE_XACTION_START:
                if (status & (BT_B_BUSY | BT_H2B_ATN))
-                      break;
+                       BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+               if (BT_STATUS & BT_H_BUSY)
+                       BT_CONTROL(BT_H_BUSY);  /* force clear */
+               BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
+                               SI_SM_CALL_WITHOUT_DELAY);
+
+       case BT_STATE_WRITE_BYTES:
+               if (status & BT_H_BUSY)
+                       BT_CONTROL(BT_H_BUSY);  /* clear */
                BT_CONTROL(BT_CLR_WR_PTR);
                write_all_bytes(bt);
-               BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
-               bt->state = BT_STATE_WRITE_CONSUME;
-               return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
-
-       case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
-               if (status & (BT_H2B_ATN | BT_B_BUSY))
-                      break;
-               bt->state = BT_STATE_B2H_WAIT;
-               /* fall through with status */
-
-       /* Stay in BT_STATE_B2H_WAIT until a packet matches.  However, spinning
-          hard here, constantly reading status, seems to hold off the
-          generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
-
-       case BT_STATE_B2H_WAIT:
-               if (!(status & BT_B2H_ATN))
-                      break;
-
-               /* Assume ordered, uncached writes: no need to wait */
-               if (!(status & BT_H_BUSY))
-                      BT_CONTROL(BT_H_BUSY); /* set */
-               BT_CONTROL(BT_B2H_ATN);         /* clear it, ACK to the BMC */
-               BT_CONTROL(BT_CLR_RD_PTR);      /* reset the queue */
-               i = read_all_bytes(bt);
-               BT_CONTROL(BT_H_BUSY);          /* clear */
-               if (!i)                         /* Try this state again */
-                      break;
-               bt->state = BT_STATE_READ_END;
-               return SI_SM_CALL_WITHOUT_DELAY;        /* for logging */
-
-       case BT_STATE_READ_END:
-
-               /* I could wait on BT_H_BUSY to go clear for a truly clean
-                  exit.  However, this is already done in XACTION_START
-                  and the (possible) extra loop/status/possible wait affects
-                  performance.  So, as long as it works, just ignore H_BUSY */
-
-#ifdef MAKE_THIS_TRUE_IF_NECESSARY
+               BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */
+               BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
+                               SI_SM_CALL_WITHOUT_DELAY);
 
-               if (status & BT_H_BUSY)
-                      break;
-#endif
-               bt->seq++;
-               bt->state = BT_STATE_IDLE;
-               return SI_SM_TRANSACTION_COMPLETE;
+       case BT_STATE_WRITE_CONSUME:
+               if (status & (BT_B_BUSY | BT_H2B_ATN))
+                       BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+               BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+                               SI_SM_CALL_WITHOUT_DELAY);
+
+       /* Spinning hard can suppress B2H_ATN and force a timeout */
+
+       case BT_STATE_READ_WAIT:
+               if (!(status & BT_B2H_ATN))
+                       BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+               BT_CONTROL(BT_H_BUSY);          /* set */
+
+               /* Uncached, ordered writes should just proceeed serially but
+                  some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+                  workaround without too much penalty to the general case. */
+
+               BT_CONTROL(BT_B2H_ATN);         /* clear it to ACK the BMC */
+               BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
+                               SI_SM_CALL_WITHOUT_DELAY);
+
+       case BT_STATE_CLEAR_B2H:
+               if (status & BT_B2H_ATN) {      /* keep hitting it */
+                       BT_CONTROL(BT_B2H_ATN);
+                       BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
+               }
+               BT_STATE_CHANGE(BT_STATE_READ_BYTES,
+                               SI_SM_CALL_WITHOUT_DELAY);
+
+       case BT_STATE_READ_BYTES:
+               if (!(status & BT_H_BUSY))      /* check in case of retry */
+                       BT_CONTROL(BT_H_BUSY);
+               BT_CONTROL(BT_CLR_RD_PTR);      /* start of BMC2HOST buffer */
+               i = read_all_bytes(bt);         /* true == packet seq match */
+               BT_CONTROL(BT_H_BUSY);          /* NOW clear */
+               if (!i)                         /* Not my message */
+                       BT_STATE_CHANGE(BT_STATE_READ_WAIT,
+                                       SI_SM_CALL_WITHOUT_DELAY);
+               bt->state = bt->complete;
+               return bt->state == BT_STATE_IDLE ?     /* where to next? */
+                       SI_SM_TRANSACTION_COMPLETE :    /* normal */
+                       SI_SM_CALL_WITHOUT_DELAY;       /* Startup magic */
+
+       case BT_STATE_LONG_BUSY:        /* For example: after FW update */
+               if (!(status & BT_B_BUSY)) {
+                       reset_flags(bt);        /* next state is now IDLE */
+                       bt_init_data(bt, bt->io);
+               }
+               return SI_SM_CALL_WITH_DELAY;   /* No repeat printing */
 
        case BT_STATE_RESET1:
-               reset_flags(bt);
-               bt->timeout = BT_RESET_DELAY;
-               bt->state = BT_STATE_RESET2;
-               break;
+               reset_flags(bt);
+               drain_BMC2HOST(bt);
+               BT_STATE_CHANGE(BT_STATE_RESET2,
+                               SI_SM_CALL_WITH_DELAY);
 
        case BT_STATE_RESET2:           /* Send a soft reset */
                BT_CONTROL(BT_CLR_WR_PTR);
@@ -464,29 +579,59 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                HOST2BMC(42);           /* Sequence number */
                HOST2BMC(3);            /* Cmd == Soft reset */
                BT_CONTROL(BT_H2B_ATN);
-               bt->state = BT_STATE_RESET3;
-               break;
+               bt->timeout = BT_RESET_DELAY * 1000000;
+               BT_STATE_CHANGE(BT_STATE_RESET3,
+                               SI_SM_CALL_WITH_DELAY);
 
-       case BT_STATE_RESET3:
+       case BT_STATE_RESET3:           /* Hold off everything for a bit */
                if (bt->timeout > 0)
-                      return SI_SM_CALL_WITH_DELAY;
-               bt->state = BT_STATE_RESTART;   /* printk in debug modes */
-               break;
+                       return SI_SM_CALL_WITH_DELAY;
+               drain_BMC2HOST(bt);
+               BT_STATE_CHANGE(BT_STATE_RESTART,
+                               SI_SM_CALL_WITH_DELAY);
 
-       case BT_STATE_RESTART:          /* don't reset retries! */
-               reset_flags(bt);
-               bt->write_data[2] = ++bt->seq;
+       case BT_STATE_RESTART:          /* don't reset retries or seq! */
                bt->read_count = 0;
                bt->nonzero_status = 0;
-               bt->timeout = BT_NORMAL_TIMEOUT;
-               bt->state = BT_STATE_XACTION_START;
-               break;
-
-       default:        /* HOSED is supposed to be caught much earlier */
-               error_recovery(bt, "internal logic error");
-               break;
-       }
-       return SI_SM_CALL_WITH_DELAY;
+               bt->timeout = bt->BT_CAP_req2rsp;
+               BT_STATE_CHANGE(BT_STATE_XACTION_START,
+                               SI_SM_CALL_WITH_DELAY);
+
+       /* Get BT Capabilities, using timing of upper level state machine.
+          Set outreqs to prevent infinite loop on timeout. */
+       case BT_STATE_CAPABILITIES_BEGIN:
+               bt->BT_CAP_outreqs = 1;
+               {
+                       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+                       bt->state = BT_STATE_IDLE;
+                       bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+               }
+               bt->complete = BT_STATE_CAPABILITIES_END;
+               BT_STATE_CHANGE(BT_STATE_XACTION_START,
+                               SI_SM_CALL_WITH_DELAY);
+
+       case BT_STATE_CAPABILITIES_END:
+               i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+               bt_init_data(bt, bt->io);
+               if ((i == 8) && !BT_CAP[2]) {
+                       bt->BT_CAP_outreqs = BT_CAP[3];
+                       bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+                       bt->BT_CAP_retries = BT_CAP[7];
+               } else
+                       printk(KERN_WARNING "IPMI BT: using default values\n");
+               if (!bt->BT_CAP_outreqs)
+                       bt->BT_CAP_outreqs = 1;
+               printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
+                       bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+               bt->timeout = bt->BT_CAP_req2rsp;
+               return SI_SM_CALL_WITHOUT_DELAY;
+
+       default:        /* should never occur */
+               return error_recovery(bt,
+                                     status,
+                                     IPMI_ERR_UNSPECIFIED);
+       }
+       return SI_SM_CALL_WITH_DELAY;
 }
 
 static int bt_detect(struct si_sm_data *bt)
@@ -497,7 +642,7 @@ static int bt_detect(struct si_sm_data *bt)
           test that first.  The calling routine uses negative logic. */
 
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
-              return 1;
+               return 1;
        reset_flags(bt);
        return 0;
 }
@@ -513,11 +658,11 @@ static int bt_size(void)
 
 struct si_sm_handlers bt_smi_handlers =
 {
-       .init_data         = bt_init_data,
-       .start_transaction = bt_start_transaction,
-       .get_result        = bt_get_result,
-       .event             = bt_event,
-       .detect            = bt_detect,
-       .cleanup           = bt_cleanup,
-       .size              = bt_size,
+       .init_data              = bt_init_data,
+       .start_transaction      = bt_start_transaction,
+       .get_result             = bt_get_result,
+       .event                  = bt_event,
+       .detect                 = bt_detect,
+       .cleanup                = bt_cleanup,
+       .size                   = bt_size,
 };
index 81fcf0ce21d1c91cf95f478a960a20d2459a6772..375d3378eecd3c7bc16833027dd67597b53f1f9b 100644 (file)
@@ -596,6 +596,31 @@ static int ipmi_ioctl(struct inode  *inode,
                rv = 0;
                break;
        }
+
+       case IPMICTL_GET_MAINTENANCE_MODE_CMD:
+       {
+               int mode;
+
+               mode = ipmi_get_maintenance_mode(priv->user);
+               if (copy_to_user(arg, &mode, sizeof(mode))) {
+                       rv = -EFAULT;
+                       break;
+               }
+               rv = 0;
+               break;
+       }
+
+       case IPMICTL_SET_MAINTENANCE_MODE_CMD:
+       {
+               int mode;
+
+               if (copy_from_user(&mode, arg, sizeof(mode))) {
+                       rv = -EFAULT;
+                       break;
+               }
+               rv = ipmi_set_maintenance_mode(priv->user, mode);
+               break;
+       }
        }
   
        return rv;
index 2062675f9e998d1e8f3420f6a1b7af747624f283..c1b8228cb7b685c619d437fac98f01ef68169437 100644 (file)
@@ -93,8 +93,8 @@ enum kcs_states {
                                   state machine. */
 };
 
-#define MAX_KCS_READ_SIZE 80
-#define MAX_KCS_WRITE_SIZE 80
+#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
+#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
 
 /* Timeouts in microseconds. */
 #define IBF_RETRY_TIMEOUT 1000000
@@ -261,12 +261,14 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
 {
        unsigned int i;
 
-       if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
-               return -1;
-       }
-       if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
-               return -2;
-       }
+       if (size < 2)
+               return IPMI_REQ_LEN_INVALID_ERR;
+       if (size > MAX_KCS_WRITE_SIZE)
+               return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+       if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED))
+               return IPMI_NOT_IN_MY_STATE_ERR;
+
        if (kcs_debug & KCS_DEBUG_MSG) {
                printk(KERN_DEBUG "start_kcs_transaction -");
                for (i = 0; i < size; i ++) {
index c47add8e47df3009aaf25d6dffe4ded0b64f0773..5703ee28e1cc0b97adb7488db3b843b2715290ca 100644 (file)
@@ -48,7 +48,7 @@
 
 #define PFX "IPMI message handler: "
 
-#define IPMI_DRIVER_VERSION "39.0"
+#define IPMI_DRIVER_VERSION "39.1"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
@@ -59,6 +59,9 @@ static int initialized = 0;
 static struct proc_dir_entry *proc_ipmi_root = NULL;
 #endif /* CONFIG_PROC_FS */
 
+/* Remain in auto-maintenance mode for this amount of time (in ms). */
+#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
+
 #define MAX_EVENTS_IN_QUEUE    25
 
 /* Don't let a message sit in a queue forever, always time it with at lest
@@ -193,17 +196,28 @@ struct ipmi_smi
 
        struct kref refcount;
 
+       /* Used for a list of interfaces. */
+       struct list_head link;
+
        /* The list of upper layers that are using me.  seq_lock
         * protects this. */
        struct list_head users;
 
+       /* Information to supply to users. */
+       unsigned char ipmi_version_major;
+       unsigned char ipmi_version_minor;
+
        /* Used for wake ups at startup. */
        wait_queue_head_t waitq;
 
        struct bmc_device *bmc;
        char *my_dev_name;
+       char *sysfs_name;
 
-       /* This is the lower-layer's sender routine. */
+       /* This is the lower-layer's sender routine.  Note that you
+        * must either be holding the ipmi_interfaces_mutex or be in
+        * an umpreemptible region to use this.  You must fetch the
+        * value into a local variable and make sure it is not NULL. */
        struct ipmi_smi_handlers *handlers;
        void                     *send_info;
 
@@ -242,6 +256,7 @@ struct ipmi_smi
        spinlock_t       events_lock; /* For dealing with event stuff. */
        struct list_head waiting_events;
        unsigned int     waiting_events_count; /* How many events in queue? */
+       int              delivering_events;
 
        /* The event receiver for my BMC, only really used at panic
           shutdown as a place to store this. */
@@ -250,6 +265,12 @@ struct ipmi_smi
        unsigned char local_sel_device;
        unsigned char local_event_generator;
 
+       /* For handling of maintenance mode. */
+       int maintenance_mode;
+       int maintenance_mode_enable;
+       int auto_maintenance_timeout;
+       spinlock_t maintenance_mode_lock; /* Used in a timer... */
+
        /* A cheap hack, if this is non-null and a message to an
           interface comes in with a NULL user, call this routine with
           it.  Note that the message will still be freed by the
@@ -338,13 +359,6 @@ struct ipmi_smi
 };
 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 
-/* Used to mark an interface entry that cannot be used but is not a
- * free entry, either, primarily used at creation and deletion time so
- * a slot doesn't get reused too quickly. */
-#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
-#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
-                                  || (i == IPMI_INVALID_INTERFACE_ENTRY))
-
 /**
  * The driver model view of the IPMI messaging driver.
  */
@@ -354,16 +368,13 @@ static struct device_driver ipmidriver = {
 };
 static DEFINE_MUTEX(ipmidriver_mutex);
 
-#define MAX_IPMI_INTERFACES 4
-static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
-
-/* Directly protects the ipmi_interfaces data structure. */
-static DEFINE_SPINLOCK(interfaces_lock);
+static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static DEFINE_MUTEX(ipmi_interfaces_mutex);
 
 /* List of watchers that want to know when smi's are added and
    deleted. */
 static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
-static DECLARE_RWSEM(smi_watchers_sem);
+static DEFINE_MUTEX(smi_watchers_mutex);
 
 
 static void free_recv_msg_list(struct list_head *q)
@@ -423,48 +434,84 @@ static void intf_free(struct kref *ref)
        kfree(intf);
 }
 
+struct watcher_entry {
+       int              intf_num;
+       ipmi_smi_t       intf;
+       struct list_head link;
+};
+
 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
 {
-       int           i;
-       unsigned long flags;
+       ipmi_smi_t intf;
+       struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+       struct watcher_entry *e, *e2;
+
+       mutex_lock(&smi_watchers_mutex);
+
+       mutex_lock(&ipmi_interfaces_mutex);
 
-       down_write(&smi_watchers_sem);
-       list_add(&(watcher->link), &smi_watchers);
-       up_write(&smi_watchers_sem);
-       spin_lock_irqsave(&interfaces_lock, flags);
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               ipmi_smi_t intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
+       /* Build a list of things to deliver. */
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               if (intf->intf_num == -1)
                        continue;
-               spin_unlock_irqrestore(&interfaces_lock, flags);
-               watcher->new_smi(i, intf->si_dev);
-               spin_lock_irqsave(&interfaces_lock, flags);
+               e = kmalloc(sizeof(*e), GFP_KERNEL);
+               if (!e)
+                       goto out_err;
+               kref_get(&intf->refcount);
+               e->intf = intf;
+               e->intf_num = intf->intf_num;
+               list_add_tail(&e->link, &to_deliver);
        }
-       spin_unlock_irqrestore(&interfaces_lock, flags);
+
+       /* We will succeed, so add it to the list. */
+       list_add(&watcher->link, &smi_watchers);
+
+       mutex_unlock(&ipmi_interfaces_mutex);
+
+       list_for_each_entry_safe(e, e2, &to_deliver, link) {
+               list_del(&e->link);
+               watcher->new_smi(e->intf_num, e->intf->si_dev);
+               kref_put(&e->intf->refcount, intf_free);
+               kfree(e);
+       }
+
+       mutex_unlock(&smi_watchers_mutex);
+
        return 0;
+
+ out_err:
+       mutex_unlock(&ipmi_interfaces_mutex);
+       mutex_unlock(&smi_watchers_mutex);
+       list_for_each_entry_safe(e, e2, &to_deliver, link) {
+               list_del(&e->link);
+               kref_put(&e->intf->refcount, intf_free);
+               kfree(e);
+       }
+       return -ENOMEM;
 }
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
-       down_write(&smi_watchers_sem);
+       mutex_lock(&smi_watchers_mutex);
        list_del(&(watcher->link));
-       up_write(&smi_watchers_sem);
+       mutex_unlock(&smi_watchers_mutex);
        return 0;
 }
 
+/*
+ * Must be called with smi_watchers_mutex held.
+ */
 static void
 call_smi_watchers(int i, struct device *dev)
 {
        struct ipmi_smi_watcher *w;
 
-       down_read(&smi_watchers_sem);
        list_for_each_entry(w, &smi_watchers, link) {
                if (try_module_get(w->owner)) {
                        w->new_smi(i, dev);
                        module_put(w->owner);
                }
        }
-       up_read(&smi_watchers_sem);
 }
 
 static int
@@ -590,6 +637,17 @@ static void deliver_response(struct ipmi_recv_msg *msg)
        }
 }
 
+static void
+deliver_err_response(struct ipmi_recv_msg *msg, int err)
+{
+       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+       msg->msg_data[0] = err;
+       msg->msg.netfn |= 1; /* Convert to a response. */
+       msg->msg.data_len = 1;
+       msg->msg.data = msg->msg_data;
+       deliver_response(msg);
+}
+
 /* Find the next sequence number not being used and add the given
    message with the given timeout to the sequence table.  This must be
    called with the interface's seq_lock held. */
@@ -727,14 +785,8 @@ static int intf_err_seq(ipmi_smi_t   intf,
        }
        spin_unlock_irqrestore(&(intf->seq_lock), flags);
 
-       if (msg) {
-               msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-               msg->msg_data[0] = err;
-               msg->msg.netfn |= 1; /* Convert to a response. */
-               msg->msg.data_len = 1;
-               msg->msg.data = msg->msg_data;
-               deliver_response(msg);
-       }
+       if (msg)
+               deliver_err_response(msg, err);
 
        return rv;
 }
@@ -776,17 +828,18 @@ int ipmi_create_user(unsigned int          if_num,
        if (!new_user)
                return -ENOMEM;
 
-       spin_lock_irqsave(&interfaces_lock, flags);
-       intf = ipmi_interfaces[if_num];
-       if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
-               spin_unlock_irqrestore(&interfaces_lock, flags);
-               rv = -EINVAL;
-               goto out_kfree;
+       mutex_lock(&ipmi_interfaces_mutex);
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               if (intf->intf_num == if_num)
+                       goto found;
        }
+       /* Not found, return an error */
+       rv = -EINVAL;
+       goto out_kfree;
 
+ found:
        /* Note that each existing user holds a refcount to the interface. */
        kref_get(&intf->refcount);
-       spin_unlock_irqrestore(&interfaces_lock, flags);
 
        kref_init(&new_user->refcount);
        new_user->handler = handler;
@@ -807,6 +860,10 @@ int ipmi_create_user(unsigned int          if_num,
                }
        }
 
+       /* Hold the lock so intf->handlers is guaranteed to be good
+        * until now */
+       mutex_unlock(&ipmi_interfaces_mutex);
+
        new_user->valid = 1;
        spin_lock_irqsave(&intf->seq_lock, flags);
        list_add_rcu(&new_user->link, &intf->users);
@@ -817,6 +874,7 @@ int ipmi_create_user(unsigned int          if_num,
 out_kref:
        kref_put(&intf->refcount, intf_free);
 out_kfree:
+       mutex_unlock(&ipmi_interfaces_mutex);
        kfree(new_user);
        return rv;
 }
@@ -846,6 +904,7 @@ int ipmi_destroy_user(ipmi_user_t user)
                    && (intf->seq_table[i].recv_msg->user == user))
                {
                        intf->seq_table[i].inuse = 0;
+                       ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
                }
        }
        spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -872,9 +931,13 @@ int ipmi_destroy_user(ipmi_user_t user)
                kfree(rcvr);
        }
 
-       module_put(intf->handlers->owner);
-       if (intf->handlers->dec_usecount)
-               intf->handlers->dec_usecount(intf->send_info);
+       mutex_lock(&ipmi_interfaces_mutex);
+       if (intf->handlers) {
+               module_put(intf->handlers->owner);
+               if (intf->handlers->dec_usecount)
+                       intf->handlers->dec_usecount(intf->send_info);
+       }
+       mutex_unlock(&ipmi_interfaces_mutex);
 
        kref_put(&intf->refcount, intf_free);
 
@@ -887,8 +950,8 @@ void ipmi_get_version(ipmi_user_t   user,
                      unsigned char *major,
                      unsigned char *minor)
 {
-       *major = ipmi_version_major(&user->intf->bmc->id);
-       *minor = ipmi_version_minor(&user->intf->bmc->id);
+       *major = user->intf->ipmi_version_major;
+       *minor = user->intf->ipmi_version_minor;
 }
 
 int ipmi_set_my_address(ipmi_user_t   user,
@@ -931,6 +994,65 @@ int ipmi_get_my_LUN(ipmi_user_t   user,
        return 0;
 }
 
+int ipmi_get_maintenance_mode(ipmi_user_t user)
+{
+       int           mode;
+       unsigned long flags;
+
+       spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
+       mode = user->intf->maintenance_mode;
+       spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
+
+       return mode;
+}
+EXPORT_SYMBOL(ipmi_get_maintenance_mode);
+
+static void maintenance_mode_update(ipmi_smi_t intf)
+{
+       if (intf->handlers->set_maintenance_mode)
+               intf->handlers->set_maintenance_mode(
+                       intf->send_info, intf->maintenance_mode_enable);
+}
+
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
+{
+       int           rv = 0;
+       unsigned long flags;
+       ipmi_smi_t    intf = user->intf;
+
+       spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+       if (intf->maintenance_mode != mode) {
+               switch (mode) {
+               case IPMI_MAINTENANCE_MODE_AUTO:
+                       intf->maintenance_mode = mode;
+                       intf->maintenance_mode_enable
+                               = (intf->auto_maintenance_timeout > 0);
+                       break;
+
+               case IPMI_MAINTENANCE_MODE_OFF:
+                       intf->maintenance_mode = mode;
+                       intf->maintenance_mode_enable = 0;
+                       break;
+
+               case IPMI_MAINTENANCE_MODE_ON:
+                       intf->maintenance_mode = mode;
+                       intf->maintenance_mode_enable = 1;
+                       break;
+
+               default:
+                       rv = -EINVAL;
+                       goto out_unlock;
+               }
+
+               maintenance_mode_update(intf);
+       }
+ out_unlock:
+       spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
+
+       return rv;
+}
+EXPORT_SYMBOL(ipmi_set_maintenance_mode);
+
 int ipmi_set_gets_events(ipmi_user_t user, int val)
 {
        unsigned long        flags;
@@ -943,20 +1065,33 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
        spin_lock_irqsave(&intf->events_lock, flags);
        user->gets_events = val;
 
-       if (val) {
-               /* Deliver any queued events. */
+       if (intf->delivering_events)
+               /*
+                * Another thread is delivering events for this, so
+                * let it handle any new events.
+                */
+               goto out;
+
+       /* Deliver any queued events. */
+       while (user->gets_events && !list_empty(&intf->waiting_events)) {
                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
                        list_move_tail(&msg->link, &msgs);
                intf->waiting_events_count = 0;
-       }
 
-       /* Hold the events lock while doing this to preserve order. */
-       list_for_each_entry_safe(msg, msg2, &msgs, link) {
-               msg->user = user;
-               kref_get(&user->refcount);
-               deliver_response(msg);
+               intf->delivering_events = 1;
+               spin_unlock_irqrestore(&intf->events_lock, flags);
+
+               list_for_each_entry_safe(msg, msg2, &msgs, link) {
+                       msg->user = user;
+                       kref_get(&user->refcount);
+                       deliver_response(msg);
+               }
+
+               spin_lock_irqsave(&intf->events_lock, flags);
+               intf->delivering_events = 0;
        }
 
+ out:
        spin_unlock_irqrestore(&intf->events_lock, flags);
 
        return 0;
@@ -1067,7 +1202,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t   user,
 void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
 {
        ipmi_smi_t intf = user->intf;
-       intf->handlers->set_run_to_completion(intf->send_info, val);
+       if (intf->handlers)
+               intf->handlers->set_run_to_completion(intf->send_info, val);
 }
 
 static unsigned char
@@ -1178,10 +1314,11 @@ static int i_ipmi_request(ipmi_user_t          user,
                          int                  retries,
                          unsigned int         retry_time_ms)
 {
-       int                  rv = 0;
-       struct ipmi_smi_msg  *smi_msg;
-       struct ipmi_recv_msg *recv_msg;
-       unsigned long        flags;
+       int                      rv = 0;
+       struct ipmi_smi_msg      *smi_msg;
+       struct ipmi_recv_msg     *recv_msg;
+       unsigned long            flags;
+       struct ipmi_smi_handlers *handlers;
 
 
        if (supplied_recv) {
@@ -1204,6 +1341,13 @@ static int i_ipmi_request(ipmi_user_t          user,
                }
        }
 
+       rcu_read_lock();
+       handlers = intf->handlers;
+       if (!handlers) {
+               rv = -ENODEV;
+               goto out_err;
+       }
+
        recv_msg->user = user;
        if (user)
                kref_get(&user->refcount);
@@ -1246,6 +1390,24 @@ static int i_ipmi_request(ipmi_user_t          user,
                        goto out_err;
                }
 
+               if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
+                     && ((msg->cmd == IPMI_COLD_RESET_CMD)
+                         || (msg->cmd == IPMI_WARM_RESET_CMD)))
+                    || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
+               {
+                       spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+                       intf->auto_maintenance_timeout
+                               = IPMI_MAINTENANCE_MODE_TIMEOUT;
+                       if (!intf->maintenance_mode
+                           && !intf->maintenance_mode_enable)
+                       {
+                               intf->maintenance_mode_enable = 1;
+                               maintenance_mode_update(intf);
+                       }
+                       spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+                                              flags);
+               }
+
                if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
                        spin_lock_irqsave(&intf->counter_lock, flags);
                        intf->sent_invalid_commands++;
@@ -1520,11 +1682,14 @@ static int i_ipmi_request(ipmi_user_t          user,
                printk("\n");
        }
 #endif
-       intf->handlers->sender(intf->send_info, smi_msg, priority);
+
+       handlers->sender(intf->send_info, smi_msg, priority);
+       rcu_read_unlock();
 
        return 0;
 
  out_err:
+       rcu_read_unlock();
        ipmi_free_smi_msg(smi_msg);
        ipmi_free_recv_msg(recv_msg);
        return rv;
@@ -1604,6 +1769,7 @@ int ipmi_request_supply_msgs(ipmi_user_t          user,
                              -1, 0);
 }
 
+#ifdef CONFIG_PROC_FS
 static int ipmb_file_read_proc(char *page, char **start, off_t off,
                               int count, int *eof, void *data)
 {
@@ -1692,6 +1858,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
 
        return (out - ((char *) page));
 }
+#endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
                            read_proc_t *read_proc, write_proc_t *write_proc,
@@ -1817,13 +1984,12 @@ static int __find_bmc_prod_dev_id(struct device *dev, void *data)
        struct bmc_device *bmc = dev_get_drvdata(dev);
 
        return (bmc->id.product_id == id->product_id
-               && bmc->id.product_id == id->product_id
                && bmc->id.device_id == id->device_id);
 }
 
 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
        struct device_driver *drv,
-       unsigned char product_id, unsigned char device_id)
+       unsigned int product_id, unsigned char device_id)
 {
        struct prod_dev_id id = {
                .product_id = product_id,
@@ -1940,6 +2106,9 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
 
 static void remove_files(struct bmc_device *bmc)
 {
+       if (!bmc->dev)
+               return;
+
        device_remove_file(&bmc->dev->dev,
                           &bmc->device_id_attr);
        device_remove_file(&bmc->dev->dev,
@@ -1973,7 +2142,8 @@ cleanup_bmc_device(struct kref *ref)
        bmc = container_of(ref, struct bmc_device, refcount);
 
        remove_files(bmc);
-       platform_device_unregister(bmc->dev);
+       if (bmc->dev)
+               platform_device_unregister(bmc->dev);
        kfree(bmc);
 }
 
@@ -1981,7 +2151,11 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
 {
        struct bmc_device *bmc = intf->bmc;
 
-       sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+       if (intf->sysfs_name) {
+               sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
+               kfree(intf->sysfs_name);
+               intf->sysfs_name = NULL;
+       }
        if (intf->my_dev_name) {
                sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
                kfree(intf->my_dev_name);
@@ -1990,6 +2164,7 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
 
        mutex_lock(&ipmidriver_mutex);
        kref_put(&bmc->refcount, cleanup_bmc_device);
+       intf->bmc = NULL;
        mutex_unlock(&ipmidriver_mutex);
 }
 
@@ -1997,6 +2172,56 @@ static int create_files(struct bmc_device *bmc)
 {
        int err;
 
+       bmc->device_id_attr.attr.name = "device_id";
+       bmc->device_id_attr.attr.owner = THIS_MODULE;
+       bmc->device_id_attr.attr.mode = S_IRUGO;
+       bmc->device_id_attr.show = device_id_show;
+
+       bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
+       bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
+       bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
+       bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+
+       bmc->revision_attr.attr.name = "revision";
+       bmc->revision_attr.attr.owner = THIS_MODULE;
+       bmc->revision_attr.attr.mode = S_IRUGO;
+       bmc->revision_attr.show = revision_show;
+
+       bmc->firmware_rev_attr.attr.name = "firmware_revision";
+       bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
+       bmc->firmware_rev_attr.attr.mode = S_IRUGO;
+       bmc->firmware_rev_attr.show = firmware_rev_show;
+
+       bmc->version_attr.attr.name = "ipmi_version";
+       bmc->version_attr.attr.owner = THIS_MODULE;
+       bmc->version_attr.attr.mode = S_IRUGO;
+       bmc->version_attr.show = ipmi_version_show;
+
+       bmc->add_dev_support_attr.attr.name = "additional_device_support";
+       bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
+       bmc->add_dev_support_attr.attr.mode = S_IRUGO;
+       bmc->add_dev_support_attr.show = add_dev_support_show;
+
+       bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
+       bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
+       bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
+       bmc->manufacturer_id_attr.show = manufacturer_id_show;
+
+       bmc->product_id_attr.attr.name = "product_id";
+       bmc->product_id_attr.attr.owner = THIS_MODULE;
+       bmc->product_id_attr.attr.mode = S_IRUGO;
+       bmc->product_id_attr.show = product_id_show;
+
+       bmc->guid_attr.attr.name = "guid";
+       bmc->guid_attr.attr.owner = THIS_MODULE;
+       bmc->guid_attr.attr.mode = S_IRUGO;
+       bmc->guid_attr.show = guid_show;
+
+       bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
+       bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
+       bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
+       bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+
        err = device_create_file(&bmc->dev->dev,
                           &bmc->device_id_attr);
        if (err) goto out;
@@ -2066,7 +2291,8 @@ out:
        return err;
 }
 
-static int ipmi_bmc_register(ipmi_smi_t intf)
+static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
+                            const char *sysfs_name)
 {
        int               rv;
        struct bmc_device *bmc = intf->bmc;
@@ -2106,9 +2332,39 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
                       bmc->id.product_id,
                       bmc->id.device_id);
        } else {
-               bmc->dev = platform_device_alloc("ipmi_bmc",
-                                                bmc->id.device_id);
+               char name[14];
+               unsigned char orig_dev_id = bmc->id.device_id;
+               int warn_printed = 0;
+
+               snprintf(name, sizeof(name),
+                        "ipmi_bmc.%4.4x", bmc->id.product_id);
+
+               while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
+                                                bmc->id.product_id,
+                                                bmc->id.device_id))
+               {
+                       if (!warn_printed) {
+                               printk(KERN_WARNING PFX
+                                      "This machine has two different BMCs"
+                                      " with the same product id and device"
+                                      " id.  This is an error in the"
+                                      " firmware, but incrementing the"
+                                      " device id to work around the problem."
+                                      " Prod ID = 0x%x, Dev ID = 0x%x\n",
+                                      bmc->id.product_id, bmc->id.device_id);
+                               warn_printed = 1;
+                       }
+                       bmc->id.device_id++; /* Wraps at 255 */
+                       if (bmc->id.device_id == orig_dev_id) {
+                               printk(KERN_ERR PFX
+                                      "Out of device ids!\n");
+                               break;
+                       }
+               }
+
+               bmc->dev = platform_device_alloc(name, bmc->id.device_id);
                if (!bmc->dev) {
+                       mutex_unlock(&ipmidriver_mutex);
                        printk(KERN_ERR
                               "ipmi_msghandler:"
                               " Unable to allocate platform device\n");
@@ -2121,6 +2377,8 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
                rv = platform_device_add(bmc->dev);
                mutex_unlock(&ipmidriver_mutex);
                if (rv) {
+                       platform_device_put(bmc->dev);
+                       bmc->dev = NULL;
                        printk(KERN_ERR
                               "ipmi_msghandler:"
                               " Unable to register bmc device: %d\n",
@@ -2130,57 +2388,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
                        return rv;
                }
 
-               bmc->device_id_attr.attr.name = "device_id";
-               bmc->device_id_attr.attr.owner = THIS_MODULE;
-               bmc->device_id_attr.attr.mode = S_IRUGO;
-               bmc->device_id_attr.show = device_id_show;
-
-               bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
-               bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
-               bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
-               bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
-
-               bmc->revision_attr.attr.name = "revision";
-               bmc->revision_attr.attr.owner = THIS_MODULE;
-               bmc->revision_attr.attr.mode = S_IRUGO;
-               bmc->revision_attr.show = revision_show;
-
-               bmc->firmware_rev_attr.attr.name = "firmware_revision";
-               bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
-               bmc->firmware_rev_attr.attr.mode = S_IRUGO;
-               bmc->firmware_rev_attr.show = firmware_rev_show;
-
-               bmc->version_attr.attr.name = "ipmi_version";
-               bmc->version_attr.attr.owner = THIS_MODULE;
-               bmc->version_attr.attr.mode = S_IRUGO;
-               bmc->version_attr.show = ipmi_version_show;
-
-               bmc->add_dev_support_attr.attr.name
-                       = "additional_device_support";
-               bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
-               bmc->add_dev_support_attr.attr.mode = S_IRUGO;
-               bmc->add_dev_support_attr.show = add_dev_support_show;
-
-               bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
-               bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
-               bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
-               bmc->manufacturer_id_attr.show = manufacturer_id_show;
-
-               bmc->product_id_attr.attr.name = "product_id";
-               bmc->product_id_attr.attr.owner = THIS_MODULE;
-               bmc->product_id_attr.attr.mode = S_IRUGO;
-               bmc->product_id_attr.show = product_id_show;
-
-               bmc->guid_attr.attr.name = "guid";
-               bmc->guid_attr.attr.owner = THIS_MODULE;
-               bmc->guid_attr.attr.mode = S_IRUGO;
-               bmc->guid_attr.show = guid_show;
-
-               bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
-               bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
-               bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
-               bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
-
                rv = create_files(bmc);
                if (rv) {
                        mutex_lock(&ipmidriver_mutex);
@@ -2202,29 +2409,44 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
         * create symlink from system interface device to bmc device
         * and back.
         */
+       intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
+       if (!intf->sysfs_name) {
+               rv = -ENOMEM;
+               printk(KERN_ERR
+                      "ipmi_msghandler: allocate link to BMC: %d\n",
+                      rv);
+               goto out_err;
+       }
+
        rv = sysfs_create_link(&intf->si_dev->kobj,
-                              &bmc->dev->dev.kobj, "bmc");
+                              &bmc->dev->dev.kobj, intf->sysfs_name);
        if (rv) {
+               kfree(intf->sysfs_name);
+               intf->sysfs_name = NULL;
                printk(KERN_ERR
                       "ipmi_msghandler: Unable to create bmc symlink: %d\n",
                       rv);
                goto out_err;
        }
 
-       size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
+       size = snprintf(dummy, 0, "ipmi%d", ifnum);
        intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
        if (!intf->my_dev_name) {
+               kfree(intf->sysfs_name);
+               intf->sysfs_name = NULL;
                rv = -ENOMEM;
                printk(KERN_ERR
                       "ipmi_msghandler: allocate link from BMC: %d\n",
                       rv);
                goto out_err;
        }
-       snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
+       snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
 
        rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
                               intf->my_dev_name);
        if (rv) {
+               kfree(intf->sysfs_name);
+               intf->sysfs_name = NULL;
                kfree(intf->my_dev_name);
                intf->my_dev_name = NULL;
                printk(KERN_ERR
@@ -2409,17 +2631,14 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
                      void                     *send_info,
                      struct ipmi_device_id    *device_id,
                      struct device            *si_dev,
+                     const char               *sysfs_name,
                      unsigned char            slave_addr)
 {
        int              i, j;
        int              rv;
        ipmi_smi_t       intf;
-       unsigned long    flags;
-       int              version_major;
-       int              version_minor;
-
-       version_major = ipmi_version_major(device_id);
-       version_minor = ipmi_version_minor(device_id);
+       ipmi_smi_t       tintf;
+       struct list_head *link;
 
        /* Make sure the driver is actually initialized, this handles
           problems with initialization order. */
@@ -2437,12 +2656,16 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        if (!intf)
                return -ENOMEM;
        memset(intf, 0, sizeof(*intf));
+
+       intf->ipmi_version_major = ipmi_version_major(device_id);
+       intf->ipmi_version_minor = ipmi_version_minor(device_id);
+
        intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
        if (!intf->bmc) {
                kfree(intf);
                return -ENOMEM;
        }
-       intf->intf_num = -1;
+       intf->intf_num = -1; /* Mark it invalid for now. */
        kref_init(&intf->refcount);
        intf->bmc->id = *device_id;
        intf->si_dev = si_dev;
@@ -2470,26 +2693,30 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        INIT_LIST_HEAD(&intf->waiting_events);
        intf->waiting_events_count = 0;
        mutex_init(&intf->cmd_rcvrs_mutex);
+       spin_lock_init(&intf->maintenance_mode_lock);
        INIT_LIST_HEAD(&intf->cmd_rcvrs);
        init_waitqueue_head(&intf->waitq);
 
        spin_lock_init(&intf->counter_lock);
        intf->proc_dir = NULL;
 
-       rv = -ENOMEM;
-       spin_lock_irqsave(&interfaces_lock, flags);
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               if (ipmi_interfaces[i] == NULL) {
-                       intf->intf_num = i;
-                       /* Reserve the entry till we are done. */
-                       ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-                       rv = 0;
+       mutex_lock(&smi_watchers_mutex);
+       mutex_lock(&ipmi_interfaces_mutex);
+       /* Look for a hole in the numbers. */
+       i = 0;
+       link = &ipmi_interfaces;
+       list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+               if (tintf->intf_num != i) {
+                       link = &tintf->link;
                        break;
                }
+               i++;
        }
-       spin_unlock_irqrestore(&interfaces_lock, flags);
-       if (rv)
-               goto out;
+       /* Add the new interface in numeric order. */
+       if (i == 0)
+               list_add_rcu(&intf->link, &ipmi_interfaces);
+       else
+               list_add_tail_rcu(&intf->link, link);
 
        rv = handlers->start_processing(send_info, intf);
        if (rv)
@@ -2497,8 +2724,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
 
        get_guid(intf);
 
-       if ((version_major > 1)
-           || ((version_major == 1) && (version_minor >= 5)))
+       if ((intf->ipmi_version_major > 1)
+           || ((intf->ipmi_version_major == 1)
+               && (intf->ipmi_version_minor >= 5)))
        {
                /* Start scanning the channels to see what is
                   available. */
@@ -2521,64 +2749,67 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        if (rv == 0)
                rv = add_proc_entries(intf, i);
 
-       rv = ipmi_bmc_register(intf);
+       rv = ipmi_bmc_register(intf, i, sysfs_name);
 
  out:
        if (rv) {
                if (intf->proc_dir)
                        remove_proc_entries(intf);
+               intf->handlers = NULL;
+               list_del_rcu(&intf->link);
+               mutex_unlock(&ipmi_interfaces_mutex);
+               mutex_unlock(&smi_watchers_mutex);
+               synchronize_rcu();
                kref_put(&intf->refcount, intf_free);
-               if (i < MAX_IPMI_INTERFACES) {
-                       spin_lock_irqsave(&interfaces_lock, flags);
-                       ipmi_interfaces[i] = NULL;
-                       spin_unlock_irqrestore(&interfaces_lock, flags);
-               }
        } else {
-               spin_lock_irqsave(&interfaces_lock, flags);
-               ipmi_interfaces[i] = intf;
-               spin_unlock_irqrestore(&interfaces_lock, flags);
+               /* After this point the interface is legal to use. */
+               intf->intf_num = i;
+               mutex_unlock(&ipmi_interfaces_mutex);
                call_smi_watchers(i, intf->si_dev);
+               mutex_unlock(&smi_watchers_mutex);
        }
 
        return rv;
 }
 
+static void cleanup_smi_msgs(ipmi_smi_t intf)
+{
+       int              i;
+       struct seq_table *ent;
+
+       /* No need for locks, the interface is down. */
+       for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
+               ent = &(intf->seq_table[i]);
+               if (!ent->inuse)
+                       continue;
+               deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
+       }
+}
+
 int ipmi_unregister_smi(ipmi_smi_t intf)
 {
-       int                     i;
        struct ipmi_smi_watcher *w;
-       unsigned long           flags;
+       int    intf_num = intf->intf_num;
 
        ipmi_bmc_unregister(intf);
 
-       spin_lock_irqsave(&interfaces_lock, flags);
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               if (ipmi_interfaces[i] == intf) {
-                       /* Set the interface number reserved until we
-                        * are done. */
-                       ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
-                       intf->intf_num = -1;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&interfaces_lock,flags);
+       mutex_lock(&smi_watchers_mutex);
+       mutex_lock(&ipmi_interfaces_mutex);
+       intf->intf_num = -1;
+       intf->handlers = NULL;
+       list_del_rcu(&intf->link);
+       mutex_unlock(&ipmi_interfaces_mutex);
+       synchronize_rcu();
 
-       if (i == MAX_IPMI_INTERFACES)
-               return -ENODEV;
+       cleanup_smi_msgs(intf);
 
        remove_proc_entries(intf);
 
        /* Call all the watcher interfaces to tell them that
           an interface is gone. */
-       down_read(&smi_watchers_sem);
        list_for_each_entry(w, &smi_watchers, link)
-               w->smi_gone(i);
-       up_read(&smi_watchers_sem);
-
-       /* Allow the entry to be reused now. */
-       spin_lock_irqsave(&interfaces_lock, flags);
-       ipmi_interfaces[i] = NULL;
-       spin_unlock_irqrestore(&interfaces_lock,flags);
+               w->smi_gone(intf_num);
+       mutex_unlock(&smi_watchers_mutex);
 
        kref_put(&intf->refcount, intf_free);
        return 0;
@@ -2660,6 +2891,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
        struct ipmi_ipmb_addr    *ipmb_addr;
        struct ipmi_recv_msg     *recv_msg;
        unsigned long            flags;
+       struct ipmi_smi_handlers *handlers;
 
        if (msg->rsp_size < 10) {
                /* Message not big enough, just ignore it. */
@@ -2716,10 +2948,16 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
                printk("\n");
        }
 #endif
-               intf->handlers->sender(intf->send_info, msg, 0);
-
-               rv = -1; /* We used the message, so return the value that
-                           causes it to not be freed or queued. */
+               rcu_read_lock();
+               handlers = intf->handlers;
+               if (handlers) {
+                       handlers->sender(intf->send_info, msg, 0);
+                       /* We used the message, so return the value
+                          that causes it to not be freed or
+                          queued. */
+                       rv = -1;
+               }
+               rcu_read_unlock();
        } else {
                /* Deliver the message to the user. */
                spin_lock_irqsave(&intf->counter_lock, flags);
@@ -3309,16 +3547,6 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
        rcu_read_unlock();
 }
 
-static void
-handle_msg_timeout(struct ipmi_recv_msg *msg)
-{
-       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-       msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
-       msg->msg.netfn |= 1; /* Convert to a response. */
-       msg->msg.data_len = 1;
-       msg->msg.data = msg->msg_data;
-       deliver_response(msg);
-}
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3350,7 +3578,11 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
                              struct list_head *timeouts, long timeout_period,
                              int slot, unsigned long *flags)
 {
-       struct ipmi_recv_msg *msg;
+       struct ipmi_recv_msg     *msg;
+       struct ipmi_smi_handlers *handlers;
+
+       if (intf->intf_num == -1)
+               return;
 
        if (!ent->inuse)
                return;
@@ -3393,13 +3625,19 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
                        return;
 
                spin_unlock_irqrestore(&intf->seq_lock, *flags);
+
                /* Send the new message.  We send with a zero
                 * priority.  It timed out, I doubt time is
                 * that critical now, and high priority
                 * messages are really only for messages to the
                 * local MC, which don't get resent. */
-               intf->handlers->sender(intf->send_info,
-                                      smi_msg, 0);
+               handlers = intf->handlers;
+               if (handlers)
+                       intf->handlers->sender(intf->send_info,
+                                              smi_msg, 0);
+               else
+                       ipmi_free_smi_msg(smi_msg);
+
                spin_lock_irqsave(&intf->seq_lock, *flags);
        }
 }
@@ -3411,18 +3649,12 @@ static void ipmi_timeout_handler(long timeout_period)
        struct ipmi_recv_msg *msg, *msg2;
        struct ipmi_smi_msg  *smi_msg, *smi_msg2;
        unsigned long        flags;
-       int                  i, j;
+       int                  i;
 
        INIT_LIST_HEAD(&timeouts);
 
-       spin_lock(&interfaces_lock);
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
-                       continue;
-               kref_get(&intf->refcount);
-               spin_unlock(&interfaces_lock);
-
+       rcu_read_lock();
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                /* See if any waiting messages need to be processed. */
                spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
                list_for_each_entry_safe(smi_msg, smi_msg2,
@@ -3442,35 +3674,60 @@ static void ipmi_timeout_handler(long timeout_period)
                   have timed out, putting them in the timeouts
                   list. */
                spin_lock_irqsave(&intf->seq_lock, flags);
-               for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
-                       check_msg_timeout(intf, &(intf->seq_table[j]),
-                                         &timeouts, timeout_period, j,
+               for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+                       check_msg_timeout(intf, &(intf->seq_table[i]),
+                                         &timeouts, timeout_period, i,
                                          &flags);
                spin_unlock_irqrestore(&intf->seq_lock, flags);
 
                list_for_each_entry_safe(msg, msg2, &timeouts, link)
-                       handle_msg_timeout(msg);
-
-               kref_put(&intf->refcount, intf_free);
-               spin_lock(&interfaces_lock);
+                       deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+
+               /*
+                * Maintenance mode handling.  Check the timeout
+                * optimistically before we claim the lock.  It may
+                * mean a timeout gets missed occasionally, but that
+                * only means the timeout gets extended by one period
+                * in that case.  No big deal, and it avoids the lock
+                * most of the time.
+                */
+               if (intf->auto_maintenance_timeout > 0) {
+                       spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
+                       if (intf->auto_maintenance_timeout > 0) {
+                               intf->auto_maintenance_timeout
+                                       -= timeout_period;
+                               if (!intf->maintenance_mode
+                                   && (intf->auto_maintenance_timeout <= 0))
+                               {
+                                       intf->maintenance_mode_enable = 0;
+                                       maintenance_mode_update(intf);
+                               }
+                       }
+                       spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+                                              flags);
+               }
        }
-       spin_unlock(&interfaces_lock);
+       rcu_read_unlock();
 }
 
 static void ipmi_request_event(void)
 {
-       ipmi_smi_t intf;
-       int        i;
+       ipmi_smi_t               intf;
+       struct ipmi_smi_handlers *handlers;
 
-       spin_lock(&interfaces_lock);
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
+       rcu_read_lock();
+       /* Called from the timer, no need to check if handlers is
+        * valid. */
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               /* No event requests when in maintenance mode. */
+               if (intf->maintenance_mode_enable)
                        continue;
 
-               intf->handlers->request_events(intf->send_info);
+               handlers = intf->handlers;
+               if (handlers)
+                       handlers->request_events(intf->send_info);
        }
-       spin_unlock(&interfaces_lock);
+       rcu_read_unlock();
 }
 
 static struct timer_list ipmi_timer;
@@ -3599,7 +3856,6 @@ static void send_panic_events(char *str)
        struct kernel_ipmi_msg            msg;
        ipmi_smi_t                        intf;
        unsigned char                     data[16];
-       int                               i;
        struct ipmi_system_interface_addr *si;
        struct ipmi_addr                  addr;
        struct ipmi_smi_msg               smi_msg;
@@ -3633,9 +3889,9 @@ static void send_panic_events(char *str)
        recv_msg.done = dummy_recv_done_handler;
 
        /* For every registered interface, send the event. */
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               if (!intf->handlers)
+                       /* Interface is not ready. */
                        continue;
 
                /* Send the event announcing the panic. */
@@ -3660,13 +3916,14 @@ static void send_panic_events(char *str)
        if (!str) 
                return;
 
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
+       /* For every registered interface, send the event. */
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                char                  *p = str;
                struct ipmi_ipmb_addr *ipmb;
                int                   j;
 
-               intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
+               if (intf->intf_num == -1)
+                       /* Interface was not ready yet. */
                        continue;
 
                /* First job here is to figure out where to send the
@@ -3792,7 +4049,6 @@ static int panic_event(struct notifier_block *this,
                       unsigned long         event,
                        void                  *ptr)
 {
-       int        i;
        ipmi_smi_t intf;
 
        if (has_panicked)
@@ -3800,9 +4056,9 @@ static int panic_event(struct notifier_block *this,
        has_panicked = 1;
 
        /* For every registered interface, set it to run to completion. */
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
-               intf = ipmi_interfaces[i];
-               if (IPMI_INVALID_INTERFACE(intf))
+       list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+               if (!intf->handlers)
+                       /* Interface is not ready. */
                        continue;
 
                intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3823,7 +4079,6 @@ static struct notifier_block panic_block = {
 
 static int ipmi_init_msghandler(void)
 {
-       int i;
        int rv;
 
        if (initialized)
@@ -3838,9 +4093,6 @@ static int ipmi_init_msghandler(void)
        printk(KERN_INFO "ipmi message handler version "
               IPMI_DRIVER_VERSION "\n");
 
-       for (i = 0; i < MAX_IPMI_INTERFACES; i++)
-               ipmi_interfaces[i] = NULL;
-
 #ifdef CONFIG_PROC_FS
        proc_ipmi_root = proc_mkdir("ipmi", NULL);
        if (!proc_ipmi_root) {
index 8d941db834570a98893af2a28ad5fc1ea01e11ec..597eb4f88b845b2c69f77b7d2e4508aaea0cd289 100644 (file)
@@ -43,6 +43,9 @@
 
 #define PFX "IPMI poweroff: "
 
+static void ipmi_po_smi_gone(int if_num);
+static void ipmi_po_new_smi(int if_num, struct device *device);
+
 /* Definitions for controlling power off (if the system supports it).  It
  * conveniently matches the IPMI chassis control values. */
 #define IPMI_CHASSIS_POWER_DOWN                0       /* power down, the default. */
 /* the IPMI data command */
 static int poweroff_powercycle;
 
+/* Which interface to use, -1 means the first we see. */
+static int ifnum_to_use = -1;
+
+/* Our local state. */
+static int ready = 0;
+static ipmi_user_t ipmi_user;
+static int ipmi_ifnum;
+static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
+
+/* Holds the old poweroff function so we can restore it on removal. */
+static void (*old_poweroff_func)(void);
+
+static int set_param_ifnum(const char *val, struct kernel_param *kp)
+{
+       int rv = param_set_int(val, kp);
+       if (rv)
+               return rv;
+       if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
+               return 0;
+
+       ipmi_po_smi_gone(ipmi_ifnum);
+       ipmi_po_new_smi(ifnum_to_use, NULL);
+       return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
+                 &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+                "timer.  Setting to -1 defaults to the first registered "
+                "interface");
+
 /* parameter definition to allow user to flag power cycle */
 module_param(poweroff_powercycle, int, 0644);
 MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
@@ -142,6 +176,42 @@ static int ipmi_request_in_rc_mode(ipmi_user_t            user,
 #define IPMI_ATCA_GET_ADDR_INFO_CMD    0x01
 #define IPMI_PICMG_ID                  0
 
+#define IPMI_NETFN_OEM                         0x2e
+#define IPMI_ATCA_PPS_GRACEFUL_RESTART         0x11
+#define IPMI_ATCA_PPS_IANA                     "\x00\x40\x0A"
+#define IPMI_MOTOROLA_MANUFACTURER_ID          0x0000A1
+#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID      0x0051
+
+static void (*atca_oem_poweroff_hook)(ipmi_user_t user) = NULL;
+
+static void pps_poweroff_atca (ipmi_user_t user)
+{
+        struct ipmi_system_interface_addr smi_addr;
+        struct kernel_ipmi_msg            send_msg;
+        int                               rv;
+        /*
+         * Configure IPMI address for local access
+         */
+        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+        smi_addr.channel = IPMI_BMC_CHANNEL;
+        smi_addr.lun = 0;
+
+        printk(KERN_INFO PFX "PPS powerdown hook used");
+
+        send_msg.netfn = IPMI_NETFN_OEM;
+        send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+        send_msg.data = IPMI_ATCA_PPS_IANA;
+        send_msg.data_len = 3;
+        rv = ipmi_request_in_rc_mode(user,
+                                  (struct ipmi_addr *) &smi_addr,
+                                   &send_msg);
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+                printk(KERN_ERR PFX "Unable to send ATCA ,"
+                       " IPMI error 0x%x\n", rv);
+        }
+       return;
+}
+
 static int ipmi_atca_detect (ipmi_user_t user)
 {
        struct ipmi_system_interface_addr smi_addr;
@@ -167,6 +237,13 @@ static int ipmi_atca_detect (ipmi_user_t user)
        rv = ipmi_request_wait_for_response(user,
                                            (struct ipmi_addr *) &smi_addr,
                                            &send_msg);
+
+        printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
+        if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+            && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+               printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+               atca_oem_poweroff_hook = pps_poweroff_atca;
+       }
        return !rv;
 }
 
@@ -200,12 +277,19 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
        rv = ipmi_request_in_rc_mode(user,
                                     (struct ipmi_addr *) &smi_addr,
                                     &send_msg);
-       if (rv) {
+        /** At this point, the system may be shutting down, and most
+         ** serial drivers (if used) will have interrupts turned off
+         ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+         ** return code
+         **/
+        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
                printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
                       " IPMI error 0x%x\n", rv);
                goto out;
        }
 
+       if(atca_oem_poweroff_hook)
+               return atca_oem_poweroff_hook(user);
  out:
        return;
 }
@@ -440,15 +524,6 @@ static struct poweroff_function poweroff_functions[] = {
                      / sizeof(struct poweroff_function))
 
 
-/* Our local state. */
-static int ready = 0;
-static ipmi_user_t ipmi_user;
-static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
-
-/* Holds the old poweroff function so we can restore it on removal. */
-static void (*old_poweroff_func)(void);
-
-
 /* Called on a powerdown request. */
 static void ipmi_poweroff_function (void)
 {
@@ -473,6 +548,9 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
        if (ready)
                return;
 
+       if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
+               return;
+
        rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
                              &ipmi_user);
        if (rv) {
@@ -481,6 +559,8 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
                return;
        }
 
+       ipmi_ifnum = if_num;
+
         /*
          * Do a get device ide and store some results, since this is
         * used by several functions.
@@ -541,9 +621,15 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
 
 static void ipmi_po_smi_gone(int if_num)
 {
-       /* This can never be called, because once poweroff driver is
-          registered, the interface can't go away until the power
-          driver is unregistered. */
+       if (!ready)
+               return;
+
+       if (ipmi_ifnum != if_num)
+               return;
+
+       ready = 0;
+       ipmi_destroy_user(ipmi_user);
+       pm_power_off = old_poweroff_func;
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -616,9 +702,9 @@ static int ipmi_poweroff_init (void)
                printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
                goto out_err;
        }
-#endif
 
  out_err:
+#endif
        return rv;
 }
 
index bb1fac104fda63470bae62d5c068263c00a5f8e6..81a0c89598e790bc767b6552642da1dc69b3968f 100644 (file)
 #include "ipmi_si_sm.h"
 #include <linux/init.h>
 #include <linux/dmi.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+#define PFX "ipmi_si: "
 
 /* Measure times between events in the driver. */
 #undef DEBUG_TIMING
@@ -92,7 +96,7 @@ enum si_intf_state {
 enum si_type {
     SI_KCS, SI_SMIC, SI_BT
 };
-static char *si_to_str[] = { "KCS", "SMIC", "BT" };
+static char *si_to_str[] = { "kcs", "smic", "bt" };
 
 #define DEVICE_NAME "ipmi_si"
 
@@ -222,7 +226,10 @@ struct smi_info
 static int force_kipmid[SI_MAX_PARMS];
 static int num_force_kipmid;
 
+static int unload_when_empty = 1;
+
 static int try_smi_init(struct smi_info *smi);
+static void cleanup_one_si(struct smi_info *to_clean);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block * nb)
@@ -240,14 +247,18 @@ static void deliver_recv_msg(struct smi_info *smi_info,
        spin_lock(&(smi_info->si_lock));
 }
 
-static void return_hosed_msg(struct smi_info *smi_info)
+static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 {
        struct ipmi_smi_msg *msg = smi_info->curr_msg;
 
+       if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
+               cCode = IPMI_ERR_UNSPECIFIED;
+       /* else use it as is */
+
        /* Make it a reponse */
        msg->rsp[0] = msg->data[0] | 4;
        msg->rsp[1] = msg->data[1];
-       msg->rsp[2] = 0xFF; /* Unknown error. */
+       msg->rsp[2] = cCode;
        msg->rsp_size = 3;
 
        smi_info->curr_msg = NULL;
@@ -298,7 +309,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
                        smi_info->curr_msg->data,
                        smi_info->curr_msg->data_size);
                if (err) {
-                       return_hosed_msg(smi_info);
+                       return_hosed_msg(smi_info, err);
                }
 
                rv = SI_SM_CALL_WITHOUT_DELAY;
@@ -640,7 +651,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                        /* If we were handling a user message, format
                            a response to send to the upper layer to
                            tell it about the error. */
-                       return_hosed_msg(smi_info);
+                       return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
                }
                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
        }
@@ -684,22 +695,24 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
        {
                /* We are idle and the upper layer requested that I fetch
                   events, so do so. */
-               unsigned char msg[2];
+               atomic_set(&smi_info->req_events, 0);
 
-               spin_lock(&smi_info->count_lock);
-               smi_info->flag_fetches++;
-               spin_unlock(&smi_info->count_lock);
+               smi_info->curr_msg = ipmi_alloc_smi_msg();
+               if (!smi_info->curr_msg)
+                       goto out;
 
-               atomic_set(&smi_info->req_events, 0);
-               msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
-               msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+               smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
+               smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+               smi_info->curr_msg->data_size = 2;
 
                smi_info->handlers->start_transaction(
-                       smi_info->si_sm, msg, 2);
-               smi_info->si_state = SI_GETTING_FLAGS;
+                       smi_info->si_sm,
+                       smi_info->curr_msg->data,
+                       smi_info->curr_msg->data_size);
+               smi_info->si_state = SI_GETTING_EVENTS;
                goto restart;
        }
-
+ out:
        return si_sm_result;
 }
 
@@ -714,6 +727,15 @@ static void sender(void                *send_info,
        struct timeval    t;
 #endif
 
+       if (atomic_read(&smi_info->stop_operation)) {
+               msg->rsp[0] = msg->data[0] | 4;
+               msg->rsp[1] = msg->data[1];
+               msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+               msg->rsp_size = 3;
+               deliver_recv_msg(smi_info, msg);
+               return;
+       }
+
        spin_lock_irqsave(&(smi_info->msg_lock), flags);
 #ifdef DEBUG_TIMING
        do_gettimeofday(&t);
@@ -805,13 +827,21 @@ static void poll(void *send_info)
 {
        struct smi_info *smi_info = send_info;
 
-       smi_event_handler(smi_info, 0);
+       /*
+        * Make sure there is some delay in the poll loop so we can
+        * drive time forward and timeout things.
+        */
+       udelay(10);
+       smi_event_handler(smi_info, 10);
 }
 
 static void request_events(void *send_info)
 {
        struct smi_info *smi_info = send_info;
 
+       if (atomic_read(&smi_info->stop_operation))
+               return;
+
        atomic_set(&smi_info->req_events, 1);
 }
 
@@ -949,12 +979,21 @@ static int smi_start_processing(void       *send_info,
        return 0;
 }
 
+static void set_maintenance_mode(void *send_info, int enable)
+{
+       struct smi_info   *smi_info = send_info;
+
+       if (!enable)
+               atomic_set(&smi_info->req_events, 0);
+}
+
 static struct ipmi_smi_handlers handlers =
 {
        .owner                  = THIS_MODULE,
        .start_processing       = smi_start_processing,
        .sender                 = sender,
        .request_events         = request_events,
+       .set_maintenance_mode   = set_maintenance_mode,
        .set_run_to_completion  = set_run_to_completion,
        .poll                   = poll,
 };
@@ -987,6 +1026,16 @@ static int num_regshifts = 0;
 static int slave_addrs[SI_MAX_PARMS];
 static int num_slave_addrs = 0;
 
+#define IPMI_IO_ADDR_SPACE  0
+#define IPMI_MEM_ADDR_SPACE 1
+static char *addr_space_to_str[] = { "I/O", "mem" };
+
+static int hotmod_handler(const char *val, struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
+                " Documentation/IPMI.txt in the kernel sources for the"
+                " gory details.");
 
 module_param_named(trydefaults, si_trydefaults, bool, 0);
 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
@@ -1038,12 +1087,12 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
                 " disabled(0).  Normally the IPMI driver auto-detects"
                 " this, but the value may be overridden by this parm.");
+module_param(unload_when_empty, int, 0);
+MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
+                " specified or found, default is 1.  Setting to 0"
+                " is useful for hot add of devices using hotmod.");
 
 
-#define IPMI_IO_ADDR_SPACE  0
-#define IPMI_MEM_ADDR_SPACE 1
-static char *addr_space_to_str[] = { "I/O", "memory" };
-
 static void std_irq_cleanup(struct smi_info *info)
 {
        if (info->si_type == SI_BT)
@@ -1317,6 +1366,234 @@ static int mem_setup(struct smi_info *info)
        return 0;
 }
 
+/*
+ * Parms come in as <op1>[:op2[:op3...]].  ops are:
+ *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ *   rsp=<regspacing>
+ *   rsi=<regsize>
+ *   rsh=<regshift>
+ *   irq=<irq>
+ *   ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+       char *name;
+       int  val;
+};
+static struct hotmod_vals hotmod_ops[] = {
+       { "add",        HM_ADD },
+       { "remove",     HM_REMOVE },
+       { NULL }
+};
+static struct hotmod_vals hotmod_si[] = {
+       { "kcs",        SI_KCS },
+       { "smic",       SI_SMIC },
+       { "bt",         SI_BT },
+       { NULL }
+};
+static struct hotmod_vals hotmod_as[] = {
+       { "mem",        IPMI_MEM_ADDR_SPACE },
+       { "i/o",        IPMI_IO_ADDR_SPACE },
+       { NULL }
+};
+static int ipmi_strcasecmp(const char *s1, const char *s2)
+{
+       while (*s1 || *s2) {
+               if (!*s1)
+                       return -1;
+               if (!*s2)
+                       return 1;
+               if (*s1 != *s2)
+                       return *s1 - *s2;
+               s1++;
+               s2++;
+       }
+       return 0;
+}
+static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
+{
+       char *s;
+       int  i;
+
+       s = strchr(*curr, ',');
+       if (!s) {
+               printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+               return -EINVAL;
+       }
+       *s = '\0';
+       s++;
+       for (i = 0; hotmod_ops[i].name; i++) {
+               if (ipmi_strcasecmp(*curr, v[i].name) == 0) {
+                       *val = v[i].val;
+                       *curr = s;
+                       return 0;
+               }
+       }
+
+       printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+       return -EINVAL;
+}
+
+static int hotmod_handler(const char *val, struct kernel_param *kp)
+{
+       char *str = kstrdup(val, GFP_KERNEL);
+       int  rv = -EINVAL;
+       char *next, *curr, *s, *n, *o;
+       enum hotmod_op op;
+       enum si_type si_type;
+       int  addr_space;
+       unsigned long addr;
+       int regspacing;
+       int regsize;
+       int regshift;
+       int irq;
+       int ipmb;
+       int ival;
+       struct smi_info *info;
+
+       if (!str)
+               return -ENOMEM;
+
+       /* Kill any trailing spaces, as we can get a "\n" from echo. */
+       ival = strlen(str) - 1;
+       while ((ival >= 0) && isspace(str[ival])) {
+               str[ival] = '\0';
+               ival--;
+       }
+
+       for (curr = str; curr; curr = next) {
+               regspacing = 1;
+               regsize = 1;
+               regshift = 0;
+               irq = 0;
+               ipmb = 0x20;
+
+               next = strchr(curr, ':');
+               if (next) {
+                       *next = '\0';
+                       next++;
+               }
+
+               rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+               if (rv)
+                       break;
+               op = ival;
+
+               rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+               if (rv)
+                       break;
+               si_type = ival;
+
+               rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+               if (rv)
+                       break;
+
+               s = strchr(curr, ',');
+               if (s) {
+                       *s = '\0';
+                       s++;
+               }
+               addr = simple_strtoul(curr, &n, 0);
+               if ((*n != '\0') || (*curr == '\0')) {
+                       printk(KERN_WARNING PFX "Invalid hotmod address"
+                              " '%s'\n", curr);
+                       break;
+               }
+
+               while (s) {
+                       curr = s;
+                       s = strchr(curr, ',');
+                       if (s) {
+                               *s = '\0';
+                               s++;
+                       }
+                       o = strchr(curr, '=');
+                       if (o) {
+                               *o = '\0';
+                               o++;
+                       }
+#define HOTMOD_INT_OPT(name, val) \
+                       if (ipmi_strcasecmp(curr, name) == 0) {         \
+                               if (!o) {                               \
+                                       printk(KERN_WARNING PFX         \
+                                              "No option given for '%s'\n", \
+                                               curr);                  \
+                                       goto out;                       \
+                               }                                       \
+                               val = simple_strtoul(o, &n, 0);         \
+                               if ((*n != '\0') || (*o == '\0')) {     \
+                                       printk(KERN_WARNING PFX         \
+                                              "Bad option given for '%s'\n", \
+                                              curr);                   \
+                                       goto out;                       \
+                               }                                       \
+                       }
+
+                       HOTMOD_INT_OPT("rsp", regspacing)
+                       else HOTMOD_INT_OPT("rsi", regsize)
+                       else HOTMOD_INT_OPT("rsh", regshift)
+                       else HOTMOD_INT_OPT("irq", irq)
+                       else HOTMOD_INT_OPT("ipmb", ipmb)
+                       else {
+                               printk(KERN_WARNING PFX
+                                      "Invalid hotmod option '%s'\n",
+                                      curr);
+                               goto out;
+                       }
+#undef HOTMOD_INT_OPT
+               }
+
+               if (op == HM_ADD) {
+                       info = kzalloc(sizeof(*info), GFP_KERNEL);
+                       if (!info) {
+                               rv = -ENOMEM;
+                               goto out;
+                       }
+
+                       info->addr_source = "hotmod";
+                       info->si_type = si_type;
+                       info->io.addr_data = addr;
+                       info->io.addr_type = addr_space;
+                       if (addr_space == IPMI_MEM_ADDR_SPACE)
+                               info->io_setup = mem_setup;
+                       else
+                               info->io_setup = port_setup;
+
+                       info->io.addr = NULL;
+                       info->io.regspacing = regspacing;
+                       if (!info->io.regspacing)
+                               info->io.regspacing = DEFAULT_REGSPACING;
+                       info->io.regsize = regsize;
+                       if (!info->io.regsize)
+                               info->io.regsize = DEFAULT_REGSPACING;
+                       info->io.regshift = regshift;
+                       info->irq = irq;
+                       if (info->irq)
+                               info->irq_setup = std_irq_setup;
+                       info->slave_addr = ipmb;
+
+                       try_smi_init(info);
+               } else {
+                       /* remove */
+                       struct smi_info *e, *tmp_e;
+
+                       mutex_lock(&smi_infos_lock);
+                       list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+                               if (e->io.addr_type != addr_space)
+                                       continue;
+                               if (e->si_type != si_type)
+                                       continue;
+                               if (e->io.addr_data == addr)
+                                       cleanup_one_si(e);
+                       }
+                       mutex_unlock(&smi_infos_lock);
+               }
+       }
+ out:
+       kfree(str);
+       return rv;
+}
 
 static __devinit void hardcode_find_bmc(void)
 {
@@ -1333,11 +1610,11 @@ static __devinit void hardcode_find_bmc(void)
 
                info->addr_source = "hardcoded";
 
-               if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+               if (!si_type[i] || ipmi_strcasecmp(si_type[i], "kcs") == 0) {
                        info->si_type = SI_KCS;
-               } else if (strcmp(si_type[i], "smic") == 0) {
+               } else if (ipmi_strcasecmp(si_type[i], "smic") == 0) {
                        info->si_type = SI_SMIC;
-               } else if (strcmp(si_type[i], "bt") == 0) {
+               } else if (ipmi_strcasecmp(si_type[i], "bt") == 0) {
                        info->si_type = SI_BT;
                } else {
                        printk(KERN_WARNING
@@ -1952,19 +2229,9 @@ static int try_get_dev_id(struct smi_info *smi_info)
 static int type_file_read_proc(char *page, char **start, off_t off,
                               int count, int *eof, void *data)
 {
-       char            *out = (char *) page;
        struct smi_info *smi = data;
 
-       switch (smi->si_type) {
-           case SI_KCS:
-               return sprintf(out, "kcs\n");
-           case SI_SMIC:
-               return sprintf(out, "smic\n");
-           case SI_BT:
-               return sprintf(out, "bt\n");
-           default:
-               return 0;
-       }
+       return sprintf(page, "%s\n", si_to_str[smi->si_type]);
 }
 
 static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -2000,7 +2267,24 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
        out += sprintf(out, "incoming_messages:     %ld\n",
                       smi->incoming_messages);
 
-       return (out - ((char *) page));
+       return out - page;
+}
+
+static int param_read_proc(char *page, char **start, off_t off,
+                          int count, int *eof, void *data)
+{
+       struct smi_info *smi = data;
+
+       return sprintf(page,
+                      "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+                      si_to_str[smi->si_type],
+                      addr_space_to_str[smi->io.addr_type],
+                      smi->io.addr_data,
+                      smi->io.regspacing,
+                      smi->io.regsize,
+                      smi->io.regshift,
+                      smi->irq,
+                      smi->slave_addr);
 }
 
 /*
@@ -2362,6 +2646,7 @@ static int try_smi_init(struct smi_info *new_smi)
                               new_smi,
                               &new_smi->device_id,
                               new_smi->dev,
+                              "bmc",
                               new_smi->slave_addr);
        if (rv) {
                printk(KERN_ERR
@@ -2390,6 +2675,16 @@ static int try_smi_init(struct smi_info *new_smi)
                goto out_err_stop_timer;
        }
 
+       rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
+                                    param_read_proc, NULL,
+                                    new_smi, THIS_MODULE);
+       if (rv) {
+               printk(KERN_ERR
+                      "ipmi_si: Unable to create proc entry: %d\n",
+                      rv);
+               goto out_err_stop_timer;
+       }
+
        list_add_tail(&new_smi->link, &smi_infos);
 
        mutex_unlock(&smi_infos_lock);
@@ -2483,7 +2778,12 @@ static __devinit int init_ipmi_si(void)
 #endif
 
 #ifdef CONFIG_PCI
-       pci_module_init(&ipmi_pci_driver);
+       rv = pci_register_driver(&ipmi_pci_driver);
+       if (rv){
+               printk(KERN_ERR
+                      "init_ipmi_si: Unable to register PCI driver: %d\n",
+                      rv);
+       }
 #endif
 
        if (si_trydefaults) {
@@ -2498,7 +2798,7 @@ static __devinit int init_ipmi_si(void)
        }
 
        mutex_lock(&smi_infos_lock);
-       if (list_empty(&smi_infos)) {
+       if (unload_when_empty && list_empty(&smi_infos)) {
                mutex_unlock(&smi_infos_lock);
 #ifdef CONFIG_PCI
                pci_unregister_driver(&ipmi_pci_driver);
@@ -2513,7 +2813,7 @@ static __devinit int init_ipmi_si(void)
 }
 module_init(init_ipmi_si);
 
-static void __devexit cleanup_one_si(struct smi_info *to_clean)
+static void cleanup_one_si(struct smi_info *to_clean)
 {
        int           rv;
        unsigned long flags;
index 39d7e5ef1a2ba82ff6d9b3526f397b0e2184a550..e64ea7d25d241d858d7a3022e070de8952a4fc00 100644 (file)
@@ -141,12 +141,14 @@ static int start_smic_transaction(struct si_sm_data *smic,
 {
        unsigned int i;
 
-       if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
-               return -1;
-       }
-       if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
-               return -2;
-       }
+       if (size < 2)
+               return IPMI_REQ_LEN_INVALID_ERR;
+       if (size > MAX_SMIC_WRITE_SIZE)
+               return IPMI_REQ_LEN_EXCEEDED_ERR;
+
+       if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED))
+               return IPMI_NOT_IN_MY_STATE_ERR;
+
        if (smic_debug & SMIC_DEBUG_MSG) {
                printk(KERN_INFO "start_smic_transaction -");
                for (i = 0; i < size; i ++) {
index 73f759eaa5a6a2a777e733cf9da9bf8b4c7fa9e9..90fb2a541916e2efba863662d4e3cffa35698af4 100644 (file)
 static int nowayout = WATCHDOG_NOWAYOUT;
 
 static ipmi_user_t watchdog_user = NULL;
+static int watchdog_ifnum;
 
 /* Default the timeout to 10 seconds. */
 static int timeout = 10;
@@ -161,6 +162,8 @@ static struct fasync_struct *fasync_q = NULL;
 static char pretimeout_since_last_heartbeat = 0;
 static char expect_close;
 
+static int ifnum_to_use = -1;
+
 static DECLARE_RWSEM(register_sem);
 
 /* Parameters to ipmi_set_timeout */
@@ -169,6 +172,8 @@ static DECLARE_RWSEM(register_sem);
 #define IPMI_SET_TIMEOUT_FORCE_HB              2
 
 static int ipmi_set_timeout(int do_heartbeat);
+static void ipmi_register_watchdog(int ipmi_intf);
+static void ipmi_unregister_watchdog(int ipmi_intf);
 
 /* If true, the driver will start running as soon as it is configured
    and ready. */
@@ -245,6 +250,26 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
        return strlen(buffer);
 }
 
+
+static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+{
+       int rv = param_set_int(val, kp);
+       if (rv)
+               return rv;
+       if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
+               return 0;
+
+       ipmi_unregister_watchdog(watchdog_ifnum);
+       ipmi_register_watchdog(ifnum_to_use);
+       return 0;
+}
+
+module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
+                 &ifnum_to_use, 0644);
+MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
+                "timer.  Setting to -1 defaults to the first registered "
+                "interface");
+
 module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
 MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
 
@@ -263,12 +288,13 @@ module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
 MODULE_PARM_DESC(preop, "Pretimeout driver operation.  One of: "
                 "preop_none, preop_panic, preop_give_data.");
 
-module_param(start_now, int, 0);
+module_param(start_now, int, 0444);
 MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
                 "soon as the driver is loaded.");
 
 module_param(nowayout, int, 0644);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+                "(default=CONFIG_WATCHDOG_NOWAYOUT)");
 
 /* Default state of the timer. */
 static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -872,6 +898,11 @@ static void ipmi_register_watchdog(int ipmi_intf)
        if (watchdog_user)
                goto out;
 
+       if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
+               goto out;
+
+       watchdog_ifnum = ipmi_intf;
+
        rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
        if (rv < 0) {
                printk(KERN_CRIT PFX "Unable to register with ipmi\n");
@@ -901,6 +932,39 @@ static void ipmi_register_watchdog(int ipmi_intf)
        }
 }
 
+static void ipmi_unregister_watchdog(int ipmi_intf)
+{
+       int rv;
+
+       down_write(&register_sem);
+
+       if (!watchdog_user)
+               goto out;
+
+       if (watchdog_ifnum != ipmi_intf)
+               goto out;
+
+       /* Make sure no one can call us any more. */
+       misc_deregister(&ipmi_wdog_miscdev);
+
+       /* Wait to make sure the message makes it out.  The lower layer has
+          pointers to our buffers, we want to make sure they are done before
+          we release our memory. */
+       while (atomic_read(&set_timeout_tofree))
+               schedule_timeout_uninterruptible(1);
+
+       /* Disconnect from IPMI. */
+       rv = ipmi_destroy_user(watchdog_user);
+       if (rv) {
+               printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
+                      rv);
+       }
+       watchdog_user = NULL;
+
+ out:
+       up_write(&register_sem);
+}
+
 #ifdef HAVE_NMI_HANDLER
 static int
 ipmi_nmi(void *dev_id, int cpu, int handled)
@@ -1004,9 +1068,7 @@ static void ipmi_new_smi(int if_num, struct device *device)
 
 static void ipmi_smi_gone(int if_num)
 {
-       /* This can never be called, because once the watchdog is
-          registered, the interface can't go away until the watchdog
-          is unregistered. */
+       ipmi_unregister_watchdog(if_num);
 }
 
 static struct ipmi_smi_watcher smi_watcher =
@@ -1148,30 +1210,32 @@ static int __init ipmi_wdog_init(void)
 
        check_parms();
 
+       register_reboot_notifier(&wdog_reboot_notifier);
+       atomic_notifier_chain_register(&panic_notifier_list,
+                       &wdog_panic_notifier);
+
        rv = ipmi_smi_watcher_register(&smi_watcher);
        if (rv) {
 #ifdef HAVE_NMI_HANDLER
                if (preaction_val == WDOG_PRETIMEOUT_NMI)
                        release_nmi(&ipmi_nmi_handler);
 #endif
+               atomic_notifier_chain_unregister(&panic_notifier_list,
+                                                &wdog_panic_notifier);
+               unregister_reboot_notifier(&wdog_reboot_notifier);
                printk(KERN_WARNING PFX "can't register smi watcher\n");
                return rv;
        }
 
-       register_reboot_notifier(&wdog_reboot_notifier);
-       atomic_notifier_chain_register(&panic_notifier_list,
-                       &wdog_panic_notifier);
-
        printk(KERN_INFO PFX "driver initialized\n");
 
        return 0;
 }
 
-static __exit void ipmi_unregister_watchdog(void)
+static void __exit ipmi_wdog_exit(void)
 {
-       int rv;
-
-       down_write(&register_sem);
+       ipmi_smi_watcher_unregister(&smi_watcher);
+       ipmi_unregister_watchdog(watchdog_ifnum);
 
 #ifdef HAVE_NMI_HANDLER
        if (nmi_handler_registered)
@@ -1179,37 +1243,8 @@ static __exit void ipmi_unregister_watchdog(void)
 #endif
 
        atomic_notifier_chain_unregister(&panic_notifier_list,
-                       &wdog_panic_notifier);
+                                        &wdog_panic_notifier);
        unregister_reboot_notifier(&wdog_reboot_notifier);
-
-       if (! watchdog_user)
-               goto out;
-
-       /* Make sure no one can call us any more. */
-       misc_deregister(&ipmi_wdog_miscdev);
-
-       /* Wait to make sure the message makes it out.  The lower layer has
-          pointers to our buffers, we want to make sure they are done before
-          we release our memory. */
-       while (atomic_read(&set_timeout_tofree))
-               schedule_timeout_uninterruptible(1);
-
-       /* Disconnect from IPMI. */
-       rv = ipmi_destroy_user(watchdog_user);
-       if (rv) {
-               printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
-                      rv);
-       }
-       watchdog_user = NULL;
-
- out:
-       up_write(&register_sem);
-}
-
-static void __exit ipmi_wdog_exit(void)
-{
-       ipmi_smi_watcher_unregister(&smi_watcher);
-       ipmi_unregister_watchdog();
 }
 module_exit(ipmi_wdog_exit);
 module_init(ipmi_wdog_init);
index 58c955e390b3cf652ab6271f1184ca0d286de3dc..1637c1d9a4baf9f801da84c1e99bd39986ac3ed3 100644 (file)
@@ -530,9 +530,9 @@ sched_again:
 /*     Interrupt handlers      */
 
 
-static void isicom_bottomhalf(void *data)
+static void isicom_bottomhalf(struct work_struct *work)
 {
-       struct isi_port *port = (struct isi_port *) data;
+       struct isi_port *port = container_of(work, struct isi_port, bh_tqueue);
        struct tty_struct *tty = port->tty;
 
        if (!tty)
@@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty)
 }
 
 /* hangup et all */
-static void do_isicom_hangup(void *data)
+static void do_isicom_hangup(struct work_struct *work)
 {
-       struct isi_port *port = data;
+       struct isi_port *port = container_of(work, struct isi_port, hangup_tq);
        struct tty_struct *tty;
 
        tty = port->tty;
@@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void)
                        port->channel = channel;
                        port->close_delay = 50 * HZ/100;
                        port->closing_wait = 3000 * HZ/100;
-                       INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
-                       INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+                       INIT_WORK(&port->hangup_tq, do_isicom_hangup);
+                       INIT_WORK(&port->bh_tqueue, isicom_bottomhalf);
                        port->status = 0;
                        init_waitqueue_head(&port->open_wait);
                        init_waitqueue_head(&port->close_wait);
index ffdf9df1a67a3cb92a48bd5ce53596a69bc4fde4..8f591945ebd91d747f861b14ac0c440f99f5aac7 100644 (file)
@@ -663,7 +663,7 @@ static int  stli_initopen(stlibrd_t *brdp, stliport_t *portp);
 static int     stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int     stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait);
 static int     stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp);
-static void    stli_dohangup(void *arg);
+static void    stli_dohangup(struct work_struct *);
 static int     stli_setport(stliport_t *portp);
 static int     stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
 static void    stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1990,9 +1990,9 @@ static void stli_start(struct tty_struct *tty)
  *     aren't that time critical).
  */
 
-static void stli_dohangup(void *arg)
+static void stli_dohangup(struct work_struct *ugly_api)
 {
-       stliport_t *portp = (stliport_t *) arg;
+       stliport_t *portp = container_of(ugly_api, stliport_t, tqhangup);
        if (portp->tty != NULL) {
                tty_hangup(portp->tty);
        }
@@ -2898,7 +2898,7 @@ static int stli_initports(stlibrd_t *brdp)
                portp->baud_base = STL_BAUDBASE;
                portp->close_delay = STL_CLOSEDELAY;
                portp->closing_wait = 30 * HZ;
-               INIT_WORK(&portp->tqhangup, stli_dohangup, portp);
+               INIT_WORK(&portp->tqhangup, stli_dohangup);
                init_waitqueue_head(&portp->open_wait);
                init_waitqueue_head(&portp->close_wait);
                init_waitqueue_head(&portp->raw_wait);
@@ -3476,6 +3476,8 @@ static int stli_initecp(stlibrd_t *brdp)
        if (sig.magic != cpu_to_le32(ECP_MAGIC))
        {
                release_region(brdp->iobase, brdp->iosize);
+               iounmap(brdp->membase);
+               brdp->membase = NULL;
                return -ENODEV;
        }
 
@@ -3632,6 +3634,8 @@ static int stli_initonb(stlibrd_t *brdp)
            sig.magic3 != cpu_to_le16(ONB_MAGIC3))
        {
                release_region(brdp->iobase, brdp->iosize);
+               iounmap(brdp->membase);
+               brdp->membase = NULL;
                return -ENODEV;
        }
 
index 7a484fc7cb9eae84d0a3b04e82f18c6e7e263955..7e975f606924e04b6d24eef36707cbc97723cebc 100644 (file)
@@ -199,6 +199,8 @@ int misc_register(struct miscdevice * misc)
        dev_t dev;
        int err = 0;
 
+       INIT_LIST_HEAD(&misc->list);
+
        down(&misc_sem);
        list_for_each_entry(c, &misc_list, list) {
                if (c->minor == misc->minor) {
index 22b9905c1e526f7521c0b70a143c766c1e0f534c..c09160383a5332c9635c4813215dabd9063bade8 100644 (file)
@@ -680,7 +680,7 @@ static int __init mmtimer_init(void)
        if (sn_rtc_cycles_per_second < 100000) {
                printk(KERN_ERR "%s: unable to determine clock frequency\n",
                       MMTIMER_NAME);
-               return -1;
+               goto out1;
        }
 
        mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
@@ -689,13 +689,13 @@ static int __init mmtimer_init(void)
        if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, IRQF_PERCPU, MMTIMER_NAME, NULL)) {
                printk(KERN_WARNING "%s: unable to allocate interrupt.",
                        MMTIMER_NAME);
-               return -1;
+               goto out1;
        }
 
        if (misc_register(&mmtimer_miscdev)) {
                printk(KERN_ERR "%s: failed to register device\n",
                       MMTIMER_NAME);
-               return -1;
+               goto out2;
        }
 
        /* Get max numbered node, calculate slots needed */
@@ -709,16 +709,18 @@ static int __init mmtimer_init(void)
        if (timers == NULL) {
                printk(KERN_ERR "%s: failed to allocate memory for device\n",
                                MMTIMER_NAME);
-               return -1;
+               goto out3;
        }
 
+       memset(timers,0,(sizeof(mmtimer_t *)*maxn));
+
        /* Allocate mmtimer_t's for each online node */
        for_each_online_node(node) {
                timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
                if (timers[node] == NULL) {
                        printk(KERN_ERR "%s: failed to allocate memory for device\n",
                                MMTIMER_NAME);
-                       return -1;
+                       goto out4;
                }
                for (i=0; i< NUM_COMPARATORS; i++) {
                        mmtimer_t * base = timers[node] + i;
@@ -739,6 +741,17 @@ static int __init mmtimer_init(void)
               sn_rtc_cycles_per_second/(unsigned long)1E6);
 
        return 0;
+
+out4:
+       for_each_online_node(node) {
+               kfree(timers[node]);
+       }
+out3:
+       misc_deregister(&mmtimer_miscdev);
+out2:
+       free_irq(SGI_MMTIMER_VECTOR, NULL);
+out1:
+       return -1;
 }
 
 module_init(mmtimer_init);
index 96cb1f07332b68e5fb5b77dfb68a74d6356c6b64..8b316953173dd5c168b2c00985c513207ea5c3ba 100644 (file)
@@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem;
 /*
  * static functions:
  */
-static void do_moxa_softint(void *);
+static void do_moxa_softint(struct work_struct *);
 static int moxa_open(struct tty_struct *, struct file *);
 static void moxa_close(struct tty_struct *, struct file *);
 static int moxa_write(struct tty_struct *, const unsigned char *, int);
@@ -363,7 +363,7 @@ static int __init moxa_init(void)
        for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) {
                ch->type = PORT_16550A;
                ch->port = i;
-               INIT_WORK(&ch->tqueue, do_moxa_softint, ch);
+               INIT_WORK(&ch->tqueue, do_moxa_softint);
                ch->tty = NULL;
                ch->close_delay = 5 * HZ / 10;
                ch->closing_wait = 30 * HZ;
@@ -498,9 +498,12 @@ static void __exit moxa_exit(void)
                printk("Couldn't unregister MOXA Intellio family serial driver\n");
        put_tty_driver(moxaDriver);
 
-       for (i = 0; i < MAX_BOARDS; i++)
+       for (i = 0; i < MAX_BOARDS; i++) {
+               if (moxaBaseAddr[i])
+                       iounmap(moxaBaseAddr[i]);
                if (moxa_boards[i].busType == MOXA_BUS_TYPE_PCI)
                        pci_dev_put(moxa_boards[i].pciInfo.pdev);
+       }
 
        if (verbose)
                printk("Done\n");
@@ -509,9 +512,9 @@ static void __exit moxa_exit(void)
 module_init(moxa_init);
 module_exit(moxa_exit);
 
-static void do_moxa_softint(void *private_)
+static void do_moxa_softint(struct work_struct *work)
 {
-       struct moxa_str *ch = (struct moxa_str *) private_;
+       struct moxa_str *ch = container_of(work, struct moxa_str, tqueue);
        struct tty_struct *tty;
 
        if (ch && (tty = ch->tty)) {
index 048d91142c172176fa03b033527e393f263bb8b9..5ed2486b7581dc18b0ca86ee4cefe017bb00975e 100644 (file)
@@ -389,7 +389,7 @@ static int mxser_init(void);
 /* static void   mxser_poll(unsigned long); */
 static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
 static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *);
-static void mxser_do_softint(void *);
+static void mxser_do_softint(struct work_struct *);
 static int mxser_open(struct tty_struct *, struct file *);
 static void mxser_close(struct tty_struct *, struct file *);
 static int mxser_write(struct tty_struct *, const unsigned char *, int);
@@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
                info->custom_divisor = hwconf->baud_base[i] * 16;
                info->close_delay = 5 * HZ / 10;
                info->closing_wait = 30 * HZ;
-               INIT_WORK(&info->tqueue, mxser_do_softint, info);
+               INIT_WORK(&info->tqueue, mxser_do_softint);
                info->normal_termios = mxvar_sdriver->init_termios;
                init_waitqueue_head(&info->open_wait);
                init_waitqueue_head(&info->close_wait);
@@ -917,9 +917,10 @@ static int mxser_init(void)
        return 0;
 }
 
-static void mxser_do_softint(void *private_)
+static void mxser_do_softint(struct work_struct *work)
 {
-       struct mxser_struct *info = private_;
+       struct mxser_struct *info =
+               container_of(work, struct mxser_struct, tqueue);
        struct tty_struct *tty;
 
        tty = info->tty;
index 50d20aafeb186638f836c3967644370c314168bf..211c93fda6fc6c789bbe5edc55b4c0618ae98661 100644 (file)
@@ -1764,29 +1764,11 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
        int rc;
 
        /* read the config-tuples */
-       tuple.DesiredTuple = CISTPL_CONFIG;
        tuple.Attributes = 0;
        tuple.TupleData = buf;
        tuple.TupleDataMax = sizeof(buf);
        tuple.TupleOffset = 0;
 
-       if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
-               fail_fn = GetFirstTuple;
-               goto cs_failed;
-       }
-       if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
-               fail_fn = GetTupleData;
-               goto cs_failed;
-       }
-       if ((fail_rc =
-            pcmcia_parse_tuple(link, &tuple, &parse)) != CS_SUCCESS) {
-               fail_fn = ParseTuple;
-               goto cs_failed;
-       }
-
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        link->io.BasePort2 = 0;
        link->io.NumPorts2 = 0;
        link->io.Attributes2 = 0;
@@ -1841,8 +1823,6 @@ static int cm4000_config(struct pcmcia_device * link, int devno)
 
        return 0;
 
-cs_failed:
-       cs_error(link, fail_fn, fail_rc);
 cs_release:
        cm4000_release(link);
        return -ENODEV;
@@ -1973,14 +1953,14 @@ static int __init cmm_init(void)
        printk(KERN_INFO "%s\n", version);
 
        cmm_class = class_create(THIS_MODULE, "cardman_4000");
-       if (!cmm_class)
-               return -1;
+       if (IS_ERR(cmm_class))
+               return PTR_ERR(cmm_class);
 
        major = register_chrdev(0, DEVICE_NAME, &cm4000_fops);
        if (major < 0) {
                printk(KERN_WARNING MODULE_NAME
                        ": could not get major number\n");
-               return -1;
+               return major;
        }
 
        rc = pcmcia_register_driver(&cm4000_driver);
index 55cf4be42976913547663b6b15b41b196d9c5800..9b1ff7e8f896b9257163e6b32549b4cb455dc5f0 100644 (file)
@@ -523,29 +523,11 @@ static int reader_config(struct pcmcia_device *link, int devno)
        int fail_fn, fail_rc;
        int rc;
 
-       tuple.DesiredTuple = CISTPL_CONFIG;
        tuple.Attributes = 0;
        tuple.TupleData = buf;
        tuple.TupleDataMax = sizeof(buf);
        tuple.TupleOffset = 0;
 
-       if ((fail_rc = pcmcia_get_first_tuple(link, &tuple)) != CS_SUCCESS) {
-               fail_fn = GetFirstTuple;
-               goto cs_failed;
-       }
-       if ((fail_rc = pcmcia_get_tuple_data(link, &tuple)) != CS_SUCCESS) {
-               fail_fn = GetTupleData;
-               goto cs_failed;
-       }
-       if ((fail_rc = pcmcia_parse_tuple(link, &tuple, &parse))
-                                                       != CS_SUCCESS) {
-               fail_fn = ParseTuple;
-               goto cs_failed;
-       }
-
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        link->io.BasePort2 = 0;
        link->io.NumPorts2 = 0;
        link->io.Attributes2 = 0;
@@ -609,8 +591,6 @@ static int reader_config(struct pcmcia_device *link, int devno)
 
        return 0;
 
-cs_failed:
-       cs_error(link, fail_fn, fail_rc);
 cs_release:
        reader_release(link);
        return -ENODEV;
@@ -721,14 +701,14 @@ static int __init cm4040_init(void)
 
        printk(KERN_INFO "%s\n", version);
        cmx_class = class_create(THIS_MODULE, "cardman_4040");
-       if (!cmx_class)
-               return -1;
+       if (IS_ERR(cmx_class))
+               return PTR_ERR(cmx_class);
 
        major = register_chrdev(0, DEVICE_NAME, &reader_fops);
        if (major < 0) {
                printk(KERN_WARNING MODULE_NAME
                        ": could not get major number\n");
-               return -1;
+               return major;
        }
 
        rc = pcmcia_register_driver(&reader_driver);
index 1a0bc30b79d10f8e858b998378c4b6ad68a4f99e..74d21c1c104fe10b5dcb20be4a8e5d8907082e4a 100644 (file)
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ds.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -235,7 +237,7 @@ typedef struct _mgslpc_info {
        int dosyncppp;
        spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        struct net_device *netdev;
 #endif
 
@@ -392,7 +394,7 @@ static void tx_timeout(unsigned long context);
 
 static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(MGSLPC_INFO *info);
 static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size);
@@ -421,7 +423,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id);
 /*
  * Bottom half interrupt handlers
  */
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(MGSLPC_INFO *info);
 static void bh_status(MGSLPC_INFO *info);
 
@@ -547,7 +549,7 @@ static int mgslpc_probe(struct pcmcia_device *link)
 
     memset(info, 0, sizeof(MGSLPC_INFO));
     info->magic = MGSLPC_MAGIC;
-    INIT_WORK(&info->task, bh_handler, info);
+    INIT_WORK(&info->task, bh_handler);
     info->max_frame_size = 4096;
     info->close_delay = 5*HZ/10;
     info->closing_wait = 30*HZ;
@@ -604,17 +606,10 @@ static int mgslpc_config(struct pcmcia_device *link)
     if (debug_level >= DEBUG_LEVEL_INFO)
            printk("mgslpc_config(0x%p)\n", link);
 
-    /* read CONFIG tuple to find its configuration registers */
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.Attributes = 0;
     tuple.TupleData = buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
 
     /* get CIS configuration entry */
 
@@ -842,9 +837,9 @@ static int bh_action(MGSLPC_INFO *info)
        return rc;
 }
 
-static void bh_handler(void* Context)
+static void bh_handler(struct work_struct *work)
 {
-       MGSLPC_INFO *info = (MGSLPC_INFO*)Context;
+       MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task);
        int action;
 
        if (!info)
@@ -1060,7 +1055,7 @@ static void tx_done(MGSLPC_INFO *info)
                info->drop_rts_on_tx_done = 0;
        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else 
@@ -1171,7 +1166,7 @@ static void dcd_change(MGSLPC_INFO *info)
        }
        else
                info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount) {
                if (info->serial_signals & SerialSignal_DCD)
                        netif_carrier_on(info->netdev);
@@ -2960,7 +2955,7 @@ static void mgslpc_add_device(MGSLPC_INFO *info)
        printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
                info->device_name, info->io_base, info->irq_level);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        hdlcdev_init(info);
 #endif
 }
@@ -2976,7 +2971,7 @@ static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
                                last->next_device = info->next_device;
                        else
                                mgslpc_device_list = info->next_device;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        hdlcdev_exit(info);
 #endif
                        release_resources(info);
@@ -3908,7 +3903,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
                                return_frame = 1;
                }
                framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                {
                        struct net_device_stats *stats = hdlc_stats(info->netdev);
                        stats->rx_errors++;
@@ -3942,7 +3937,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
                                ++framesize;
                        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount)
                                hdlcdev_rx(info, buf->data, framesize);
                        else
@@ -4098,7 +4093,7 @@ static void tx_timeout(unsigned long context)
 
        spin_unlock_irqrestore(&info->lock,flags);
        
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else
@@ -4106,7 +4101,7 @@ static void tx_timeout(unsigned long context)
                bh_transmit(info);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
index d40df30c2b10014a220ec0739f33bf05d4f603a2..4c6782a1ecdba4f5682ecfed4b8ffa45f7e40ec2 100644 (file)
@@ -1422,9 +1422,9 @@ static struct keydata {
 
 static unsigned int ip_cnt;
 
-static void rekey_seq_generator(void *private_);
+static void rekey_seq_generator(struct work_struct *work);
 
-static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
 
 /*
  * Lock avoidance:
@@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL);
  * happen, and even if that happens only a not perfectly compliant
  * ISN is generated, nothing fatal.
  */
-static void rekey_seq_generator(void *private_)
+static void rekey_seq_generator(struct work_struct *work)
 {
        struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
 
index 7ac68cb3beddfa61df80e8ca0cfb22a3b53cf70b..e79b2ede8510c6cecbffffa1c0ba72f2823de580 100644 (file)
@@ -1026,6 +1026,7 @@ static int __init rio_init(void)
                        found++;
                } else {
                        iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+                       p->RIOHosts[p->RIONumHosts].Caddr = NULL;
                }
        }
 
@@ -1078,6 +1079,7 @@ static int __init rio_init(void)
                        found++;
                } else {
                        iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
+                       p->RIOHosts[p->RIONumHosts].Caddr = NULL;
                }
 #else
                printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
@@ -1117,8 +1119,10 @@ static int __init rio_init(void)
                                }
                        }
 
-                       if (!okboard)
+                       if (!okboard) {
                                iounmap(hp->Caddr);
+                               hp->Caddr = NULL;
+                       }
                }
        }
 
@@ -1188,6 +1192,8 @@ static void __exit rio_exit(void)
                }
                /* It is safe/allowed to del_timer a non-active timer */
                del_timer(&hp->timer);
+               if (hp->Caddr)
+                       iounmap(hp->Caddr);
                if (hp->Type == RIO_PCI)
                        pci_dev_put(hp->pdev);
        }
index 5ab32b38f45a28f55737c1b88ead868d299671ea..0a77bfcd5b5e8a571a98fded62d3f39653d3cb5b 100644 (file)
 static struct riscom_board * IRQ_to_board[16];
 static struct tty_driver *riscom_driver;
 
-static unsigned long baud_table[] =  {
-       0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-       9600, 19200, 38400, 57600, 76800, 0, 
-};
-
 static struct riscom_board rc_board[RC_NBOARD] =  {
        {
                .base   = RC_IOBASE1,
@@ -1516,9 +1511,9 @@ static void rc_start(struct tty_struct * tty)
  *     do_rc_hangup() -> tty->hangup() -> rc_hangup()
  * 
  */
-static void do_rc_hangup(void *private_)
+static void do_rc_hangup(struct work_struct *ugly_api)
 {
-       struct riscom_port      *port = (struct riscom_port *) private_;
+       struct riscom_port      *port = container_of(ugly_api, struct riscom_port, tqueue_hangup);
        struct tty_struct       *tty;
        
        tty = port->tty;
@@ -1567,9 +1562,9 @@ static void rc_set_termios(struct tty_struct * tty, struct termios * old_termios
        }
 }
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *ugly_api)
 {
-       struct riscom_port      *port = (struct riscom_port *) private_;
+       struct riscom_port      *port = container_of(ugly_api, struct riscom_port, tqueue);
        struct tty_struct       *tty;
        
        if(!(tty = port->tty)) 
@@ -1632,8 +1627,8 @@ static inline int rc_init_drivers(void)
        memset(rc_port, 0, sizeof(rc_port));
        for (i = 0; i < RC_NPORT * RC_NBOARD; i++)  {
                rc_port[i].magic = RISCOM8_MAGIC;
-               INIT_WORK(&rc_port[i].tqueue, do_softint, &rc_port[i]);
-               INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup, &rc_port[i]);
+               INIT_WORK(&rc_port[i].tqueue, do_softint);
+               INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup);
                rc_port[i].close_delay = 50 * HZ/100;
                rc_port[i].closing_wait = 3000 * HZ/100;
                init_waitqueue_head(&rc_port[i].open_wait);
index 3af7f0958c5d85a6a49a96ca8562eb2e31e122d3..9ba13af234be4e041d323eddb5a8c58eb3ec264d 100644 (file)
@@ -706,9 +706,9 @@ cd2401_rx_interrupt(int irq, void *dev_id)
  * had to poll every port to see if that port needed servicing.
  */
 static void
-do_softint(void *private_)
+do_softint(struct work_struct *ugly_api)
 {
-  struct cyclades_port *info = (struct cyclades_port *) private_;
+  struct cyclades_port *info = container_of(ugly_api, struct cyclades_port, tqueue);
   struct tty_struct    *tty;
 
     tty = info->tty;
@@ -2273,7 +2273,7 @@ scrn[1] = '\0';
                info->blocked_open = 0;
                info->default_threshold = 0;
                info->default_timeout = 0;
-               INIT_WORK(&info->tqueue, do_softint, info);
+               INIT_WORK(&info->tqueue, do_softint);
                init_waitqueue_head(&info->open_wait);
                init_waitqueue_head(&info->close_wait);
                /* info->session */
index c084149153de1050bb996fdb59cf017bd2137936..fc87070f18660b584003abd2184fc1ba6d0507f1 100644 (file)
@@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state)
        sonypi_device.bluetooth_power = state;
 }
 
-static void input_keyrelease(void *data)
+static void input_keyrelease(struct work_struct *work)
 {
        struct sonypi_keypress kp;
 
@@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev)
                        goto err_inpdev_unregister;
                }
 
-               INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
+               INIT_WORK(&sonypi_device.input_work, input_keyrelease);
        }
 
        sonypi_enable(0);
index 7e1bd9562c2ac3c2e63b40fb50dc92019b5231f8..99137ab66b625f0e9e4d16dc836f6b13ac088aa2 100644 (file)
@@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty)
  *     do_sx_hangup() -> tty->hangup() -> sx_hangup()
  *
  */
-static void do_sx_hangup(void *private_)
+static void do_sx_hangup(struct work_struct *work)
 {
-       struct specialix_port   *port = (struct specialix_port *) private_;
+       struct specialix_port   *port =
+               container_of(work, struct specialix_port, tqueue_hangup);
        struct tty_struct       *tty;
 
        func_enter();
@@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios
 }
 
 
-static void do_softint(void *private_)
+static void do_softint(struct work_struct *work)
 {
-       struct specialix_port   *port = (struct specialix_port *) private_;
+       struct specialix_port   *port =
+               container_of(work, struct specialix_port, tqueue);
        struct tty_struct       *tty;
 
        func_enter();
@@ -2411,8 +2413,8 @@ static int sx_init_drivers(void)
        memset(sx_port, 0, sizeof(sx_port));
        for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
                sx_port[i].magic = SPECIALIX_MAGIC;
-               INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]);
-               INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]);
+               INIT_WORK(&sx_port[i].tqueue, do_softint);
+               INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
                sx_port[i].close_delay = 50 * HZ/100;
                sx_port[i].closing_wait = 3000 * HZ/100;
                init_waitqueue_head(&sx_port[i].open_wait);
index 522e88e395ccac7358ed2cb2ac1b999244af55c2..5e2de62bce706dd7b4f7843e5f608c98f1cb96a3 100644 (file)
@@ -500,7 +500,7 @@ static int  stl_echatintr(stlbrd_t *brdp);
 static int     stl_echmcaintr(stlbrd_t *brdp);
 static int     stl_echpciintr(stlbrd_t *brdp);
 static int     stl_echpci64intr(stlbrd_t *brdp);
-static void    stl_offintr(void *private);
+static void    stl_offintr(struct work_struct *);
 static stlbrd_t *stl_allocbrd(void);
 static stlport_t *stl_getport(int brdnr, int panelnr, int portnr);
 
@@ -2081,14 +2081,12 @@ static int stl_echpci64intr(stlbrd_t *brdp)
 /*
  *     Service an off-level request for some channel.
  */
-static void stl_offintr(void *private)
+static void stl_offintr(struct work_struct *work)
 {
-       stlport_t               *portp;
+       stlport_t               *portp = container_of(work, stlport_t, tqueue);
        struct tty_struct       *tty;
        unsigned int            oldsigs;
 
-       portp = private;
-
 #ifdef DEBUG
        printk("stl_offintr(portp=%x)\n", (int) portp);
 #endif
@@ -2156,7 +2154,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
                portp->baud_base = STL_BAUDBASE;
                portp->close_delay = STL_CLOSEDELAY;
                portp->closing_wait = 30 * HZ;
-               INIT_WORK(&portp->tqueue, stl_offintr, portp);
+               INIT_WORK(&portp->tqueue, stl_offintr);
                init_waitqueue_head(&portp->open_wait);
                init_waitqueue_head(&portp->close_wait);
                portp->stats.brd = portp->brdnr;
index 06784adcc35c78f7f17650aa69722359b11129a2..645187b9141e527d012c613e665e9f15ea5e9ecc 100644 (file)
 #include <linux/hdlc.h>
 #include <linux/dma-mapping.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -320,7 +322,7 @@ struct mgsl_struct {
        int dosyncppp;
        spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        struct net_device *netdev;
 #endif
 };
@@ -728,7 +730,7 @@ static void usc_loopmode_send_done( struct mgsl_struct * info );
 
 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct mgsl_struct *info);
 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
@@ -802,7 +804,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u
 /*
  * Bottom half interrupt handlers
  */
-static void mgsl_bh_handler(void* Context);
+static void mgsl_bh_handler(struct work_struct *work);
 static void mgsl_bh_receive(struct mgsl_struct *info);
 static void mgsl_bh_transmit(struct mgsl_struct *info);
 static void mgsl_bh_status(struct mgsl_struct *info);
@@ -1071,9 +1073,10 @@ static int mgsl_bh_action(struct mgsl_struct *info)
 /*
  *     Perform bottom half processing of work items queued by ISR.
  */
-static void mgsl_bh_handler(void* Context)
+static void mgsl_bh_handler(struct work_struct *work)
 {
-       struct mgsl_struct *info = (struct mgsl_struct*)Context;
+       struct mgsl_struct *info =
+               container_of(work, struct mgsl_struct, task);
        int action;
 
        if (!info)
@@ -1276,7 +1279,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
                info->drop_rts_on_tx_done = 0;
        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else 
@@ -1341,7 +1344,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
                                info->input_signal_events.dcd_up++;
                        } else
                                info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount) {
                                if (status & MISCSTATUS_DCD)
                                        netif_carrier_on(info->netdev);
@@ -4312,7 +4315,7 @@ static void mgsl_add_device( struct mgsl_struct *info )
                        info->max_frame_size );
        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        hdlcdev_init(info);
 #endif
 
@@ -4337,7 +4340,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
        } else {
                memset(info, 0, sizeof(struct mgsl_struct));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, mgsl_bh_handler, info);
+               INIT_WORK(&info->task, mgsl_bh_handler);
                info->max_frame_size = 4096;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
@@ -4470,7 +4473,7 @@ static void synclink_cleanup(void)
 
        info = mgsl_device_list;
        while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                hdlcdev_exit(info);
 #endif
                mgsl_release_resources(info);
@@ -6644,7 +6647,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
                                return_frame = 1;
                }
                framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                {
                        struct net_device_stats *stats = hdlc_stats(info->netdev);
                        stats->rx_errors++;
@@ -6720,7 +6723,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
                                                *ptmp);
                        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount)
                                hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
                        else
@@ -7624,7 +7627,7 @@ static void mgsl_tx_timeout(unsigned long context)
 
        spin_unlock_irqrestore(&info->irq_spinlock,flags);
        
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else
@@ -7700,7 +7703,7 @@ static int usc_loopmode_active( struct mgsl_struct * info)
        return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
index d4334c79f8d44cece757c8f56630a39a23308fb7..e4730a7312b50d8e3ad4788c9207c294566ae51d 100644 (file)
 
 #include "linux/synclink.h"
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 /*
@@ -171,7 +173,7 @@ static void set_break(struct tty_struct *tty, int break_state);
 /*
  * generic HDLC support and callbacks
  */
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(struct slgt_info *info);
 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
@@ -359,7 +361,7 @@ struct slgt_info {
        int netcount;
        int dosyncppp;
        spinlock_t netlock;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        struct net_device *netdev;
 #endif
 
@@ -485,7 +487,7 @@ static void enable_loopback(struct slgt_info *info);
 static void set_rate(struct slgt_info *info, u32 data_rate);
 
 static int  bh_action(struct slgt_info *info);
-static void bh_handler(void* context);
+static void bh_handler(struct work_struct *work);
 static void bh_transmit(struct slgt_info *info);
 static void isr_serial(struct slgt_info *info);
 static void isr_rdma(struct slgt_info *info);
@@ -1354,7 +1356,7 @@ static void set_break(struct tty_struct *tty, int break_state)
        spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -1878,9 +1880,9 @@ static int bh_action(struct slgt_info *info)
 /*
  * perform bottom half processing
  */
-static void bh_handler(void* context)
+static void bh_handler(struct work_struct *work)
 {
-       struct slgt_info *info = context;
+       struct slgt_info *info = container_of(work, struct slgt_info, task);
        int action;
 
        if (!info)
@@ -2002,7 +2004,7 @@ static void dcd_change(struct slgt_info *info)
        } else {
                info->input_signal_events.dcd_down++;
        }
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount) {
                if (info->signals & SerialSignal_DCD)
                        netif_carrier_on(info->netdev);
@@ -2180,7 +2182,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
                        set_signals(info);
                }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                if (info->netcount)
                        hdlcdev_tx_done(info);
                else
@@ -3306,7 +3308,7 @@ static void add_device(struct slgt_info *info)
                devstr, info->device_name, info->phys_reg_addr,
                info->irq_level, info->max_frame_size);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        hdlcdev_init(info);
 #endif
 }
@@ -3326,7 +3328,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
        } else {
                memset(info, 0, sizeof(struct slgt_info));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, bh_handler, info);
+               INIT_WORK(&info->task, bh_handler);
                info->max_frame_size = 4096;
                info->raw_rx_size = DMABUFSIZE;
                info->close_delay = 5*HZ/10;
@@ -3488,7 +3490,7 @@ static void slgt_cleanup(void)
        /* release devices */
        info = slgt_device_list;
        while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                hdlcdev_exit(info);
 #endif
                free_dma_bufs(info);
@@ -3522,6 +3524,7 @@ static int __init slgt_init(void)
 
        if (!slgt_device_list) {
                printk("%s no devices found\n",driver_name);
+               pci_unregister_driver(&pci_driver);
                return -ENODEV;
        }
 
@@ -4433,7 +4436,7 @@ check_again:
                        framesize = 0;
        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (framesize == 0) {
                struct net_device_stats *stats = hdlc_stats(info->netdev);
                stats->rx_errors++;
@@ -4476,7 +4479,7 @@ check_again:
                                framesize++;
                        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount)
                                hdlcdev_rx(info,info->tmp_rbuf, framesize);
                        else
@@ -4779,7 +4782,7 @@ static void tx_timeout(unsigned long context)
        info->tx_count = 0;
        spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else
@@ -4799,6 +4802,6 @@ static void rx_timeout(unsigned long context)
        spin_lock_irqsave(&info->lock, flags);
        info->pending_bh |= BH_RECEIVE;
        spin_unlock_irqrestore(&info->lock, flags);
-       bh_handler(info);
+       bh_handler(&info->task);
 }
 
index 3e932b68137118286959ecbd39f3df7de41eeaa2..20a96ef250bec9e3074544d43fac7889efdc5a95 100644 (file)
 #include <linux/workqueue.h>
 #include <linux/hdlc.h>
 
-#ifdef CONFIG_HDLC_MODULE
-#define CONFIG_HDLC 1
+#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
+#define SYNCLINK_GENERIC_HDLC 1
+#else
+#define SYNCLINK_GENERIC_HDLC 0
 #endif
 
 #define GET_USER(error,value,addr) error = get_user(value,addr)
@@ -280,7 +282,7 @@ typedef struct _synclinkmp_info {
        int dosyncppp;
        spinlock_t netlock;
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        struct net_device *netdev;
 #endif
 
@@ -536,7 +538,7 @@ static void throttle(struct tty_struct * tty);
 static void unthrottle(struct tty_struct * tty);
 static void set_break(struct tty_struct *tty, int break_state);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
 static void hdlcdev_tx_done(SLMP_INFO *info);
 static void hdlcdev_rx(SLMP_INFO *info, char *buf, int size);
@@ -602,7 +604,7 @@ static void enable_loopback(SLMP_INFO *info, int enable);
 static void set_rate(SLMP_INFO *info, u32 data_rate);
 
 static int  bh_action(SLMP_INFO *info);
-static void bh_handler(void* Context);
+static void bh_handler(struct work_struct *work);
 static void bh_receive(SLMP_INFO *info);
 static void bh_transmit(SLMP_INFO *info);
 static void bh_status(SLMP_INFO *info);
@@ -1607,7 +1609,7 @@ static void set_break(struct tty_struct *tty, int break_state)
        spin_unlock_irqrestore(&info->lock,flags);
 }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
 
 /**
  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
@@ -2063,9 +2065,9 @@ int bh_action(SLMP_INFO *info)
 
 /* Perform bottom half processing of work items queued by ISR.
  */
-void bh_handler(void* Context)
+void bh_handler(struct work_struct *work)
 {
-       SLMP_INFO *info = (SLMP_INFO*)Context;
+       SLMP_INFO *info = container_of(work, SLMP_INFO, task);
        int action;
 
        if (!info)
@@ -2339,7 +2341,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
                        set_signals(info);
                }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                if (info->netcount)
                        hdlcdev_tx_done(info);
                else
@@ -2523,7 +2525,7 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
                                info->input_signal_events.dcd_up++;
                        } else
                                info->input_signal_events.dcd_down++;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount) {
                                if (status & SerialSignal_DCD)
                                        netif_carrier_on(info->netdev);
@@ -3783,7 +3785,7 @@ void add_device(SLMP_INFO *info)
                info->irq_level,
                info->max_frame_size );
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        hdlcdev_init(info);
 #endif
 }
@@ -3805,7 +3807,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
        } else {
                memset(info, 0, sizeof(SLMP_INFO));
                info->magic = MGSL_MAGIC;
-               INIT_WORK(&info->task, bh_handler, info);
+               INIT_WORK(&info->task, bh_handler);
                info->max_frame_size = 4096;
                info->close_delay = 5*HZ/10;
                info->closing_wait = 30*HZ;
@@ -3977,7 +3979,7 @@ static void synclinkmp_cleanup(void)
        /* release devices */
        info = synclinkmp_device_list;
        while(info) {
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                hdlcdev_exit(info);
 #endif
                free_dma_bufs(info);
@@ -4979,7 +4981,7 @@ CheckAgain:
                        info->icount.rxcrc++;
 
                framesize = 0;
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                {
                        struct net_device_stats *stats = hdlc_stats(info->netdev);
                        stats->rx_errors++;
@@ -5020,7 +5022,7 @@ CheckAgain:
                                        index = 0;
                        }
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
                        if (info->netcount)
                                hdlcdev_rx(info,info->tmp_rx_buf,framesize);
                        else
@@ -5531,7 +5533,7 @@ void tx_timeout(unsigned long context)
 
        spin_unlock_irqrestore(&info->lock,flags);
 
-#ifdef CONFIG_HDLC
+#if SYNCLINK_GENERIC_HDLC
        if (info->netcount)
                hdlcdev_tx_done(info);
        else
index 5f49280779fb0a98c567ec12e2350a634d95eece..05810c8d20bca7162794f399014153f1c9390add 100644 (file)
@@ -182,6 +182,18 @@ static struct sysrq_key_op sysrq_showstate_op = {
        .enable_mask    = SYSRQ_ENABLE_DUMP,
 };
 
+static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty)
+{
+       show_state_filter(TASK_UNINTERRUPTIBLE);
+}
+static struct sysrq_key_op sysrq_showstate_blocked_op = {
+       .handler        = sysrq_handle_showstate_blocked,
+       .help_msg       = "showBlockedTasks",
+       .action_msg     = "Show Blocked State",
+       .enable_mask    = SYSRQ_ENABLE_DUMP,
+};
+
+
 static void sysrq_handle_showmem(int key, struct tty_struct *tty)
 {
        show_mem();
@@ -219,13 +231,13 @@ static struct sysrq_key_op sysrq_term_op = {
        .enable_mask    = SYSRQ_ENABLE_SIGNAL,
 };
 
-static void moom_callback(void *ignored)
+static void moom_callback(struct work_struct *ignored)
 {
        out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
                        GFP_KERNEL, 0);
 }
 
-static DECLARE_WORK(moom_work, moom_callback, NULL);
+static DECLARE_WORK(moom_work, moom_callback);
 
 static void sysrq_handle_moom(int key, struct tty_struct *tty)
 {
@@ -304,7 +316,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
        /* May be assigned at init time by SMP VOYAGER */
        NULL,                           /* v */
        NULL,                           /* w */
-       NULL,                           /* x */
+       &sysrq_showstate_blocked_op,    /* x */
        NULL,                           /* y */
        NULL                            /* z */
 };
index dd36fd04a8421c8c873e7754408ca196de64eaf7..07067c31c4ec7344237ce33165e85760b88afebb 100644 (file)
@@ -249,6 +249,7 @@ int tosh_smm(SMMRegisters *regs)
 
        return eax;
 }
+EXPORT_SYMBOL(tosh_smm);
 
 
 static int tosh_ioctl(struct inode *ip, struct file *fp, unsigned int cmd,
index 6e1329d404d201ce50f5ff981fede21cbf317d95..33e1f66e39cb62550a762724b8dcbfd4d7d5326b 100644 (file)
@@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr)
        schedule_work(&chip->work);
 }
 
-static void timeout_work(void *ptr)
+static void timeout_work(struct work_struct *work)
 {
-       struct tpm_chip *chip = ptr;
+       struct tpm_chip *chip = container_of(work, struct tpm_chip, work);
 
        down(&chip->buffer_mutex);
        atomic_set(&chip->data_pending, 0);
@@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
        init_MUTEX(&chip->tpm_mutex);
        INIT_LIST_HEAD(&chip->list);
 
-       INIT_WORK(&chip->work, timeout_work, chip);
+       INIT_WORK(&chip->work, timeout_work);
 
        init_timer(&chip->user_read_timer);
        chip->user_read_timer.function = user_reader_timeout;
@@ -1155,6 +1155,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
 
        if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
                list_del(&chip->list);
+               misc_deregister(&chip->vendor.miscdev);
                put_device(dev);
                clear_bit(chip->dev_num, dev_mask);
                kfree(chip);
index 50dc49205a231a2c5854a0e9699e39e5db9e6bed..b3cfc8bc613c7bbfeee7ef7ce037e638793f32a0 100644 (file)
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
        
 /**
  *     do_tty_hangup           -       actual handler for hangup events
- *     @data: tty device
+ *     @work: tty device
  *
  *     This can be called by the "eventd" kernel thread.  That is process
  *     synchronous but doesn't hold any locks, so we need to make sure we
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
  *             tasklist_lock to walk task list for hangup event
  *
  */
-static void do_tty_hangup(void *data)
+static void do_tty_hangup(struct work_struct *work)
 {
-       struct tty_struct *tty = (struct tty_struct *) data;
+       struct tty_struct *tty =
+               container_of(work, struct tty_struct, hangup_work);
        struct file * cons_filp = NULL;
        struct file *filp, *f = NULL;
        struct task_struct *p;
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty)
 
        printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
 #endif
-       do_tty_hangup((void *) tty);
+       do_tty_hangup(&tty->hangup_work);
 }
 EXPORT_SYMBOL(tty_vhangup);
 
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file,
  * Nasty bug: do_SAK is being called in interrupt context.  This can
  * deadlock.  We punt it up to process context.  AKPM - 16Mar2001
  */
-static void __do_SAK(void *arg)
+static void __do_SAK(struct work_struct *work)
 {
+       struct tty_struct *tty =
+               container_of(work, struct tty_struct, SAK_work);
 #ifdef TTY_SOFT_SAK
        tty_hangup(tty);
 #else
-       struct tty_struct *tty = arg;
        struct task_struct *g, *p;
        int session;
        int             i;
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty)
 {
        if (!tty)
                return;
-       PREPARE_WORK(&tty->SAK_work, __do_SAK, tty);
+       PREPARE_WORK(&tty->SAK_work, __do_SAK);
        schedule_work(&tty->SAK_work);
 }
 
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK);
 
 /**
  *     flush_to_ldisc
- *     @private_: tty structure passed from work queue.
+ *     @work: tty structure passed from work queue.
  *
  *     This routine is called out of the software interrupt to flush data
  *     from the buffer chain to the line discipline.
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK);
  *     receive_buf method is single threaded for each tty instance.
  */
  
-static void flush_to_ldisc(void *private_)
+static void flush_to_ldisc(struct work_struct *work)
 {
-       struct tty_struct *tty = (struct tty_struct *) private_;
+       struct tty_struct *tty =
+               container_of(work, struct tty_struct, buf.work.work);
        unsigned long   flags;
        struct tty_ldisc *disc;
        struct tty_buffer *tbuf, *head;
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty)
        spin_unlock_irqrestore(&tty->buf.lock, flags);
 
        if (tty->low_latency)
-               flush_to_ldisc((void *) tty);
+               flush_to_ldisc(&tty->buf.work.work);
        else
                schedule_delayed_work(&tty->buf.work, 1);
 }
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty)
        tty->overrun_time = jiffies;
        tty->buf.head = tty->buf.tail = NULL;
        tty_buffer_init(tty);
-       INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+       INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
        init_MUTEX(&tty->buf.pty_sem);
        mutex_init(&tty->termios_mutex);
        init_waitqueue_head(&tty->write_wait);
        init_waitqueue_head(&tty->read_wait);
-       INIT_WORK(&tty->hangup_work, do_tty_hangup, tty);
+       INIT_WORK(&tty->hangup_work, do_tty_hangup);
        mutex_init(&tty->atomic_read_lock);
        mutex_init(&tty->atomic_write_lock);
        spin_lock_init(&tty->read_lock);
        INIT_LIST_HEAD(&tty->tty_files);
-       INIT_WORK(&tty->SAK_work, NULL, NULL);
+       INIT_WORK(&tty->SAK_work, NULL);
 }
 
 /*
index 87587b4385abcb88dc72d4a113052d17baae9c5e..a8239dac994fee2264bf638bd3f5bff3176f446f 100644 (file)
@@ -152,10 +152,10 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y);
 static void save_cur(struct vc_data *vc);
 static void reset_terminal(struct vc_data *vc, int do_clear);
 static void con_flush_chars(struct tty_struct *tty);
-static void set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(char __user *p);
 static void set_cursor(struct vc_data *vc);
 static void hide_cursor(struct vc_data *vc);
-static void console_callback(void *ignored);
+static void console_callback(struct work_struct *ignored);
 static void blank_screen_t(unsigned long dummy);
 static void set_palette(struct vc_data *vc);
 
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
 static int blankinterval = 10*60*HZ;
 static int vesa_off_interval;
 
-static DECLARE_WORK(console_work, console_callback, NULL);
+static DECLARE_WORK(console_work, console_callback);
 
 /*
  * fg_console is the current virtual console,
@@ -2154,7 +2154,7 @@ out:
  * with other console code and prevention of re-entrancy is
  * ensured with console_sem.
  */
-static void console_callback(void *ignored)
+static void console_callback(struct work_struct *ignored)
 {
        acquire_console_sem();
 
@@ -2369,7 +2369,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
                        ret = __put_user(data, p);
                        break;
                case TIOCL_SETVESABLANK:
-                       set_vesa_blanking(p);
+                       ret = set_vesa_blanking(p);
                        break;
                case TIOCL_GETKMSGREDIRECT:
                        data = kmsg_redirect;
@@ -3313,11 +3313,15 @@ postcore_initcall(vtconsole_class_init);
  *     Screen blanking
  */
 
-static void set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(char __user *p)
 {
-    unsigned int mode;
-    get_user(mode, p + 1);
-    vesa_blank_mode = (mode < 4) ? mode : 0;
+       unsigned int mode;
+
+       if (get_user(mode, p + 1))
+               return -EFAULT;
+
+       vesa_blank_mode = (mode < 4) ? mode : 0;
+       return 0;
 }
 
 void do_blank_screen(int entering_gfx)
index e275dd4a705dea0503e19b1d3ad21ac75832e045..61138726b501ea710459f3f36dbbef085eee475f 100644 (file)
@@ -634,7 +634,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, const struct usb_devi
        usb_pcwd->intr_size = (le16_to_cpu(endpoint->wMaxPacketSize) > 8 ? le16_to_cpu(endpoint->wMaxPacketSize) : 8);
 
        /* set up the memory buffer's */
-       if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, SLAB_ATOMIC, &usb_pcwd->intr_dma))) {
+       if (!(usb_pcwd->intr_buffer = usb_buffer_alloc(udev, usb_pcwd->intr_size, GFP_ATOMIC, &usb_pcwd->intr_dma))) {
                printk(KERN_ERR PFX "Out of memory\n");
                goto error;
        }
index 05f8ce2cfb4ab77a950a54371c8559b7fa17ef49..b418b16e910e9b92f155207a87b49e8f7b1d7f3b 100644 (file)
 #include <linux/connector.h>
 #include <linux/delay.h>
 
-void cn_queue_wrapper(void *data)
+void cn_queue_wrapper(struct work_struct *work)
 {
-       struct cn_callback_data *d = data;
+       struct cn_callback_entry *cbq =
+               container_of(work, struct cn_callback_entry, work.work);
+       struct cn_callback_data *d = &cbq->data;
 
        d->callback(d->callback_priv);
 
@@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc
        memcpy(&cbq->id.id, id, sizeof(struct cb_id));
        cbq->data.callback = callback;
        
-       INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
+       INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper);
        return cbq;
 }
 
index b49bacfd8de8cb6800a833526424a53a303ddefb..5e7cd45d10eec4e97a28469d9a9e41512a5d0228 100644 (file)
@@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
        spin_lock_bh(&dev->cbdev->queue_lock);
        list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
                if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
-                       if (likely(!test_bit(0, &__cbq->work.pending) &&
+                       if (likely(!test_bit(WORK_STRUCT_PENDING,
+                                            &__cbq->work.work.management) &&
                                        __cbq->data.ddata == NULL)) {
                                __cbq->data.callback_priv = msg;
 
                                __cbq->data.ddata = data;
                                __cbq->data.destruct_data = destruct_data;
 
-                               if (queue_work(dev->cbdev->cn_queue,
-                                               &__cbq->work))
+                               if (queue_delayed_work(
+                                           dev->cbdev->cn_queue,
+                                           &__cbq->work, 0))
                                        err = 0;
                        } else {
-                               struct work_struct *w;
                                struct cn_callback_data *d;
                                
-                               w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
-                               if (w) {
-                                       d = (struct cn_callback_data *)(w+1);
-
+                               __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC);
+                               if (__cbq) {
+                                       d = &__cbq->data;
                                        d->callback_priv = msg;
                                        d->callback = __cbq->data.callback;
                                        d->ddata = data;
                                        d->destruct_data = destruct_data;
-                                       d->free = w;
+                                       d->free = __cbq;
 
-                                       INIT_LIST_HEAD(&w->entry);
-                                       w->pending = 0;
-                                       w->func = &cn_queue_wrapper;
-                                       w->data = d;
-                                       init_timer(&w->timer);
+                                       INIT_DELAYED_WORK(&__cbq->work,
+                                                         &cn_queue_wrapper);
                                        
-                                       if (queue_work(dev->cbdev->cn_queue, w))
+                                       if (queue_delayed_work(
+                                                   dev->cbdev->cn_queue,
+                                                   &__cbq->work, 0))
                                                err = 0;
                                        else {
-                                               kfree(w);
+                                               kfree(__cbq);
                                                err = -EINVAL;
                                        }
                                } else
index dd0c2623e27be0312eba1220c8d1eb38fe09bef3..47ab42db122a6b87f654e668ba5bd4f3ae70725f 100644 (file)
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
 
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
-static void handle_update(void *data);
+static void handle_update(struct work_struct *work);
 
 /**
  * Two notifier lists: the "policy" list is involved in the
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        mutex_init(&policy->lock);
        mutex_lock(&policy->lock);
        init_completion(&policy->kobj_unregister);
-       INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
+       INIT_WORK(&policy->update, handle_update);
 
        /* call driver. From then on the cpufreq must be able
         * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
 }
 
 
-static void handle_update(void *data)
+static void handle_update(struct work_struct *work)
 {
-       unsigned int cpu = (unsigned int)(long)data;
+       struct cpufreq_policy *policy =
+               container_of(work, struct cpufreq_policy, update);
+       unsigned int cpu = policy->cpu;
        dprintk("handle_update for cpu %u called\n", cpu);
        cpufreq_update_policy(cpu);
 }
@@ -1535,7 +1537,6 @@ int cpufreq_update_policy(unsigned int cpu)
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
@@ -1575,7 +1576,6 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
 {
     .notifier_call = cpufreq_cpu_callback,
 };
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*********************************************************************
  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
index c4c578defabfa0e5df0251d430fde58fee7d45ab..5ef5ede5b8848e96c3d7d4ba00d9e7e8a11bc60f 100644 (file)
@@ -59,7 +59,7 @@ static unsigned int                           def_sampling_rate;
 #define MAX_SAMPLING_DOWN_FACTOR               (10)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
 
 struct cpu_dbs_info_s {
        struct cpufreq_policy   *cur_policy;
@@ -82,7 +82,7 @@ static unsigned int dbs_enable;       /* number of CPUs using this policy */
  * is recursive for the same process. -Venki
  */
 static DEFINE_MUTEX    (dbs_mutex);
-static DECLARE_WORK    (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
 
 struct dbs_tuners {
        unsigned int            sampling_rate;
@@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu)
        }
 }
 
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 { 
        int i;
        lock_cpu_hotplug();
@@ -435,7 +435,6 @@ static void do_dbs_timer(void *data)
 
 static inline void dbs_timer_init(void)
 {
-       INIT_WORK(&dbs_work, do_dbs_timer, NULL);
        schedule_delayed_work(&dbs_work,
                        usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
        return;
index bf8aa45d4f019cccd866103d278fa9e7bd4f669d..e1cc5113c2ae4695eb998efacb8a9747a7eecbc0 100644 (file)
@@ -47,13 +47,17 @@ static unsigned int def_sampling_rate;
 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER   (1000)
 #define TRANSITION_LATENCY_LIMIT               (10 * 1000)
 
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 
 struct cpu_dbs_info_s {
        cputime64_t prev_cpu_idle;
        cputime64_t prev_cpu_wall;
        struct cpufreq_policy *cur_policy;
-       struct work_struct work;
+       struct delayed_work work;
+       enum dbs_sample sample_type;
        unsigned int enable;
        struct cpufreq_frequency_table *freq_table;
        unsigned int freq_lo;
@@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
        }
 }
 
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
 {
        unsigned int cpu = smp_processor_id();
        struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+       enum dbs_sample sample_type = dbs_info->sample_type;
        /* We want all CPUs to do sampling nearly on same jiffy */
        int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+       /* Permit rescheduling of this work item */
+       work_release(work);
+
        delay -= jiffies % delay;
 
        if (!dbs_info->enable)
                return;
        /* Common NORMAL_SAMPLE setup */
-       INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
        if (!dbs_tuners_ins.powersave_bias ||
-           (unsigned long) data == DBS_NORMAL_SAMPLE) {
+           sample_type == DBS_NORMAL_SAMPLE) {
                lock_cpu_hotplug();
                dbs_check_cpu(dbs_info);
                unlock_cpu_hotplug();
                if (dbs_info->freq_lo) {
                        /* Setup timer for SUB_SAMPLE */
-                       INIT_WORK(&dbs_info->work, do_dbs_timer,
-                                       (void *)DBS_SUB_SAMPLE);
+                       dbs_info->sample_type = DBS_SUB_SAMPLE;
                        delay = dbs_info->freq_hi_jiffies;
                }
        } else {
@@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu)
        delay -= jiffies % delay;
 
        ondemand_powersave_bias_init();
-       INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+       INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+       dbs_info->sample_type = DBS_NORMAL_SAMPLE;
        queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
 }
 
index adb554153f672a48d2d4d9f711513ef93d5ce9c8..e816535ab305e33b06ec2368dc668fa43a4f3b21 100644 (file)
@@ -51,4 +51,17 @@ config CRYPTO_DEV_PADLOCK_SHA
          If unsure say M. The compiled module will be
          called padlock-sha.ko
 
+config CRYPTO_DEV_GEODE
+       tristate "Support for the Geode LX AES engine"
+       depends on CRYPTO && X86_32
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       default m
+       help
+         Say 'Y' here to use the AMD Geode LX processor on-board AES
+         engine for the CryptoAPI AES alogrithm.
+
+         To compile this driver as a module, choose M here: the module
+         will be called geode-aes.
+
 endmenu
index 4c3d0ec1cf805a8b8407d959b6e085e9a2864460..6059cf8694141c077dc592e3186865d815aa6097 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
+obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
new file mode 100644 (file)
index 0000000..43a6839
--- /dev/null
@@ -0,0 +1,474 @@
+ /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/algapi.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+#include "geode-aes.h"
+
+/* Register definitions */
+
+#define AES_CTRLA_REG  0x0000
+
+#define AES_CTRL_START     0x01
+#define AES_CTRL_DECRYPT   0x00
+#define AES_CTRL_ENCRYPT   0x02
+#define AES_CTRL_WRKEY     0x04
+#define AES_CTRL_DCA       0x08
+#define AES_CTRL_SCA       0x10
+#define AES_CTRL_CBC       0x20
+
+#define AES_INTR_REG  0x0008
+
+#define AES_INTRA_PENDING (1 << 16)
+#define AES_INTRB_PENDING (1 << 17)
+
+#define AES_INTR_PENDING  (AES_INTRA_PENDING | AES_INTRB_PENDING)
+#define AES_INTR_MASK     0x07
+
+#define AES_SOURCEA_REG   0x0010
+#define AES_DSTA_REG      0x0014
+#define AES_LENA_REG      0x0018
+#define AES_WRITEKEY0_REG 0x0030
+#define AES_WRITEIV0_REG  0x0040
+
+/*  A very large counter that is used to gracefully bail out of an
+ *  operation in case of trouble
+ */
+
+#define AES_OP_TIMEOUT    0x50000
+
+/* Static structures */
+
+static void __iomem * _iobase;
+static spinlock_t lock;
+
+/* Write a 128 bit field (either a writable key or IV) */
+static inline void
+_writefield(u32 offset, void *value)
+{
+       int i;
+       for(i = 0; i < 4; i++)
+               iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
+}
+
+/* Read a 128 bit field (either a writable key or IV) */
+static inline void
+_readfield(u32 offset, void *value)
+{
+       int i;
+       for(i = 0; i < 4; i++)
+               ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
+}
+
+static int
+do_crypt(void *src, void *dst, int len, u32 flags)
+{
+       u32 status;
+       u32 counter = AES_OP_TIMEOUT;
+
+       iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
+       iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
+       iowrite32(len,  _iobase + AES_LENA_REG);
+
+       /* Start the operation */
+       iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
+
+       do
+               status = ioread32(_iobase + AES_INTR_REG);
+       while(!(status & AES_INTRA_PENDING) && --counter);
+
+       /* Clear the event */
+       iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
+       return counter ? 0 : 1;
+}
+
+static unsigned int
+geode_aes_crypt(struct geode_aes_op *op)
+{
+
+       u32 flags = 0;
+       int iflags;
+
+       if (op->len == 0 || op->src == op->dst)
+               return 0;
+
+       if (op->flags & AES_FLAGS_COHERENT)
+               flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
+
+       if (op->dir == AES_DIR_ENCRYPT)
+               flags |= AES_CTRL_ENCRYPT;
+
+       /* Start the critical section */
+
+       spin_lock_irqsave(&lock, iflags);
+
+       if (op->mode == AES_MODE_CBC) {
+               flags |= AES_CTRL_CBC;
+               _writefield(AES_WRITEIV0_REG, op->iv);
+       }
+
+       if (op->flags & AES_FLAGS_USRKEY) {
+               flags |= AES_CTRL_WRKEY;
+               _writefield(AES_WRITEKEY0_REG, op->key);
+       }
+
+       do_crypt(op->src, op->dst, op->len, flags);
+
+       if (op->mode == AES_MODE_CBC)
+               _readfield(AES_WRITEIV0_REG, op->iv);
+
+       spin_unlock_irqrestore(&lock, iflags);
+
+       return op->len;
+}
+
+/* CRYPTO-API Functions */
+
+static int
+geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
+{
+       struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+       if (len != AES_KEY_LENGTH) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+
+       memcpy(op->key, key, len);
+       return 0;
+}
+
+static void
+geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+       struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+       if ((out == NULL) || (in == NULL))
+               return;
+
+       op->src = (void *) in;
+       op->dst = (void *) out;
+       op->mode = AES_MODE_ECB;
+       op->flags = 0;
+       op->len = AES_MIN_BLOCK_SIZE;
+       op->dir = AES_DIR_ENCRYPT;
+
+       geode_aes_crypt(op);
+}
+
+
+static void
+geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+       struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+       if ((out == NULL) || (in == NULL))
+               return;
+
+       op->src = (void *) in;
+       op->dst = (void *) out;
+       op->mode = AES_MODE_ECB;
+       op->flags = 0;
+       op->len = AES_MIN_BLOCK_SIZE;
+       op->dir = AES_DIR_DECRYPT;
+
+       geode_aes_crypt(op);
+}
+
+
+static struct crypto_alg geode_alg = {
+       .cra_name               =       "aes",
+       .cra_driver_name        =       "geode-aes-128",
+       .cra_priority           =       300,
+       .cra_alignmask          =       15,
+       .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct geode_aes_op),
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(geode_alg.cra_list),
+       .cra_u                  =       {
+               .cipher = {
+                       .cia_min_keysize        =  AES_KEY_LENGTH,
+                       .cia_max_keysize        =  AES_KEY_LENGTH,
+                       .cia_setkey             =  geode_setkey,
+                       .cia_encrypt            =  geode_encrypt,
+                       .cia_decrypt            =  geode_decrypt
+               }
+       }
+};
+
+static int
+geode_cbc_decrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while((nbytes = walk.nbytes)) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->mode = AES_MODE_CBC;
+               op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+               op->dir = AES_DIR_DECRYPT;
+
+               memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+
+               ret = geode_aes_crypt(op);
+
+               memcpy(walk.iv, op->iv, AES_IV_LENGTH);
+               nbytes -= ret;
+
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static int
+geode_cbc_encrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while((nbytes = walk.nbytes)) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->mode = AES_MODE_CBC;
+               op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+               op->dir = AES_DIR_ENCRYPT;
+
+               memcpy(op->iv, walk.iv, AES_IV_LENGTH);
+
+               ret = geode_aes_crypt(op);
+               nbytes -= ret;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static struct crypto_alg geode_cbc_alg = {
+       .cra_name               =       "cbc(aes)",
+       .cra_driver_name        =       "cbc-aes-geode-128",
+       .cra_priority           =       400,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct geode_aes_op),
+       .cra_alignmask          =       15,
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(geode_cbc_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       AES_KEY_LENGTH,
+                       .max_keysize            =       AES_KEY_LENGTH,
+                       .setkey                 =       geode_setkey,
+                       .encrypt                =       geode_cbc_encrypt,
+                       .decrypt                =       geode_cbc_decrypt,
+               }
+       }
+};
+
+static int
+geode_ecb_decrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while((nbytes = walk.nbytes)) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->mode = AES_MODE_ECB;
+               op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+               op->dir = AES_DIR_DECRYPT;
+
+               ret = geode_aes_crypt(op);
+               nbytes -= ret;
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static int
+geode_ecb_encrypt(struct blkcipher_desc *desc,
+                 struct scatterlist *dst, struct scatterlist *src,
+                 unsigned int nbytes)
+{
+       struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err, ret;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while((nbytes = walk.nbytes)) {
+               op->src = walk.src.virt.addr,
+               op->dst = walk.dst.virt.addr;
+               op->mode = AES_MODE_ECB;
+               op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
+               op->dir = AES_DIR_ENCRYPT;
+
+               ret = geode_aes_crypt(op);
+               nbytes -= ret;
+               ret =  blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static struct crypto_alg geode_ecb_alg = {
+       .cra_name               =       "ecb(aes)",
+       .cra_driver_name        =       "ecb-aes-geode-128",
+       .cra_priority           =       400,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          =       AES_MIN_BLOCK_SIZE,
+       .cra_ctxsize            =       sizeof(struct geode_aes_op),
+       .cra_alignmask          =       15,
+       .cra_type               =       &crypto_blkcipher_type,
+       .cra_module             =       THIS_MODULE,
+       .cra_list               =       LIST_HEAD_INIT(geode_ecb_alg.cra_list),
+       .cra_u                  =       {
+               .blkcipher = {
+                       .min_keysize            =       AES_KEY_LENGTH,
+                       .max_keysize            =       AES_KEY_LENGTH,
+                       .setkey                 =       geode_setkey,
+                       .encrypt                =       geode_ecb_encrypt,
+                       .decrypt                =       geode_ecb_decrypt,
+               }
+       }
+};
+
+static void
+geode_aes_remove(struct pci_dev *dev)
+{
+       crypto_unregister_alg(&geode_alg);
+       crypto_unregister_alg(&geode_ecb_alg);
+       crypto_unregister_alg(&geode_cbc_alg);
+
+       pci_iounmap(dev, _iobase);
+       _iobase = NULL;
+
+       pci_release_regions(dev);
+       pci_disable_device(dev);
+}
+
+
+static int
+geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       int ret;
+
+       if ((ret = pci_enable_device(dev)))
+               return ret;
+
+       if ((ret = pci_request_regions(dev, "geode-aes-128")))
+               goto eenable;
+
+       _iobase = pci_iomap(dev, 0, 0);
+
+       if (_iobase == NULL) {
+               ret = -ENOMEM;
+               goto erequest;
+       }
+
+       spin_lock_init(&lock);
+
+       /* Clear any pending activity */
+       iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
+
+       if ((ret = crypto_register_alg(&geode_alg)))
+               goto eiomap;
+
+       if ((ret = crypto_register_alg(&geode_ecb_alg)))
+               goto ealg;
+
+       if ((ret = crypto_register_alg(&geode_cbc_alg)))
+               goto eecb;
+
+       printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
+       return 0;
+
+ eecb:
+       crypto_unregister_alg(&geode_ecb_alg);
+
+ ealg:
+       crypto_unregister_alg(&geode_alg);
+
+ eiomap:
+       pci_iounmap(dev, _iobase);
+
+ erequest:
+       pci_release_regions(dev);
+
+ eenable:
+       pci_disable_device(dev);
+
+       printk(KERN_ERR "geode-aes:  GEODE AES initialization failed.\n");
+       return ret;
+}
+
+static struct pci_device_id geode_aes_tbl[] = {
+       { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
+
+static struct pci_driver geode_aes_driver = {
+       .name = "Geode LX AES",
+       .id_table = geode_aes_tbl,
+       .probe = geode_aes_probe,
+       .remove = __devexit_p(geode_aes_remove)
+};
+
+static int __init
+geode_aes_init(void)
+{
+       return pci_module_init(&geode_aes_driver);
+}
+
+static void __exit
+geode_aes_exit(void)
+{
+       pci_unregister_driver(&geode_aes_driver);
+}
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION("Geode LX Hardware AES driver");
+MODULE_LICENSE("GPL");
+
+module_init(geode_aes_init);
+module_exit(geode_aes_exit);
diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
new file mode 100644 (file)
index 0000000..8003a36
--- /dev/null
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003-2006, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _GEODE_AES_H_
+#define _GEODE_AES_H_
+
+#define AES_KEY_LENGTH 16
+#define AES_IV_LENGTH  16
+
+#define AES_MIN_BLOCK_SIZE 16
+
+#define AES_MODE_ECB 0
+#define AES_MODE_CBC 1
+
+#define AES_DIR_DECRYPT 0
+#define AES_DIR_ENCRYPT 1
+
+#define AES_FLAGS_USRKEY   (1 << 0)
+#define AES_FLAGS_COHERENT (1 << 1)
+
+struct geode_aes_op {
+
+       void *src;
+       void *dst;
+
+       u32 mode;
+       u32 dir;
+       u32 flags;
+       int len;
+
+       u8 key[AES_KEY_LENGTH];
+       u8 iv[AES_IV_LENGTH];
+};
+
+#endif
index 0358419a0e48b7870f044920ae318c6d50187ef3..8e872610461954d54197d77fc18eb53af5ea4341 100644 (file)
@@ -636,10 +636,10 @@ static int ioat_self_test(struct ioat_device *device)
        dma_cookie_t cookie;
        int err = 0;
 
-       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
        if (!src)
                return -ENOMEM;
-       dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL);
+       dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
        if (!dest) {
                kfree(src);
                return -ENOMEM;
index 75e9e38330ff4d4de4785ba116c7c2cd20fb6f4d..1b4fc922180386cd4107ec42f4328b4f50584be4 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sysdev.h>
 #include <linux/ctype.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
 #include <asm/edac.h>
index 4630f1969a0997875bb11fa8a7950cbbae75b2ca..15edf40828b479172d615347780f3bb2f2d8ce51 100644 (file)
@@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void)
        return t1;
 }
 
-static void ds1374_set_work(void *arg)
+static ulong new_time;
+
+static void ds1374_set_work(struct work_struct *work)
 {
        ulong t1, t2;
        int limit = 10;         /* arbitrary retry limit */
 
-       t1 = *(ulong *) arg;
+       t1 = new_time;
 
        mutex_lock(&ds1374_mutex);
 
@@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg)
                         "can't confirm time set from rtc chip\n");
 }
 
-static ulong new_time;
-
 static struct workqueue_struct *ds1374_workqueue;
 
-static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time);
+static DECLARE_WORK(ds1374_work, ds1374_set_work);
 
 int ds1374_set_rtc_time(ulong nowtime)
 {
@@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime)
        if (in_interrupt())
                queue_work(ds1374_workqueue, &ds1374_work);
        else
-               ds1374_set_work(&new_time);
+               ds1374_set_work(NULL);
 
        return 0;
 }
index 2dd0a34d9472e17496dff2692aec74cd5cd3ac61..420377c86422b229008e11361e079cca6556346e 100644 (file)
@@ -215,8 +215,15 @@ m41t00_set(void *arg)
 }
 
 static ulong new_time;
+/* well, isn't this API just _lovely_? */
+static void
+m41t00_barf(struct work_struct *unusable)
+{
+       m41t00_set(&new_time);
+}
+
 static struct workqueue_struct *m41t00_wq;
-static DECLARE_WORK(m41t00_work, m41t00_set, &new_time);
+static DECLARE_WORK(m41t00_work, m41t00_barf);
 
 int
 m41t00_set_rtc_time(ulong nowtime)
index 0c68d0f0d8e513e028784faf5e5ef7c3a524f599..e23bc0d62159c54032bc713f5ab0c996b629c2c8 100644 (file)
@@ -389,14 +389,6 @@ config BLK_DEV_RZ1000
          Linux. This may slow disk throughput by a few percent, but at least
          things will operate 100% reliably.
 
-config BLK_DEV_SL82C105
-       tristate "Winbond SL82c105 support"
-       depends on PCI && (PPC || ARM) && BLK_DEV_IDEPCI
-       help
-         If you have a Winbond SL82c105 IDE controller, say Y here to enable
-         special configuration for this chip. This is common on various CHRP
-         motherboards, but could be used elsewhere. If in doubt, say Y.
-
 config BLK_DEV_IDEDMA_PCI
        bool "Generic PCI bus-master DMA support"
        depends on PCI && BLK_DEV_IDEPCI
@@ -712,6 +704,14 @@ config BLK_DEV_SIS5513
 
          Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
 
+config BLK_DEV_SL82C105
+       tristate "Winbond SL82c105 support"
+       depends on (PPC || ARM)
+       help
+         If you have a Winbond SL82c105 IDE controller, say Y here to enable
+         special configuration for this chip. This is common on various CHRP
+         motherboards, but could be used elsewhere. If in doubt, say Y.
+
 config BLK_DEV_SLC90E66
        tristate "SLC90E66 chipset support"
        help
index 287a66201150a0bd32432817a4357705b767329c..16890769dca686dff62ec62bc0c2f741b7ef190c 100644 (file)
@@ -973,8 +973,8 @@ ide_settings_t *ide_find_setting_by_name (ide_drive_t *drive, char *name)
  *     @drive: drive
  *
  *     Automatically remove all the driver specific settings for this
- *     drive. This function may sleep and must not be called from IRQ
- *     context. The caller must hold ide_setting_sem.
+ *     drive. This function may not be called from IRQ context. The
+ *     caller must hold ide_setting_sem.
  */
  
 static void auto_remove_settings (ide_drive_t *drive)
@@ -1874,11 +1874,22 @@ void ide_unregister_subdriver(ide_drive_t *drive, ide_driver_t *driver)
 {
        unsigned long flags;
        
-       down(&ide_setting_sem);
-       spin_lock_irqsave(&ide_lock, flags);
 #ifdef CONFIG_PROC_FS
        ide_remove_proc_entries(drive->proc, driver->proc);
 #endif
+       down(&ide_setting_sem);
+       spin_lock_irqsave(&ide_lock, flags);
+       /*
+        * ide_setting_sem protects the settings list
+        * ide_lock protects the use of settings
+        *
+        * so we need to hold both, ide_settings_sem because we want to
+        * modify the settings list, and ide_lock because we cannot take
+        * a setting out that is being used.
+        *
+        * OTOH both ide_{read,write}_setting are only ever used under
+        * ide_setting_sem.
+        */
        auto_remove_settings(drive);
        spin_unlock_irqrestore(&ide_lock, flags);
        up(&ide_setting_sem);
index bef4759f70e505e7e5e401751fa3e0e990b416b1..7efd28ac21ed41aa8a5363efeaf11079b04c321f 100644 (file)
@@ -192,20 +192,10 @@ static int ide_config(struct pcmcia_device *link)
     tuple.TupleOffset = 0;
     tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &stk->parse));
-    link->conf.ConfigBase = stk->parse.config.base;
-    link->conf.Present = stk->parse.config.rmask[0];
-
-    tuple.DesiredTuple = CISTPL_MANFID;
-    if (!pcmcia_get_first_tuple(link, &tuple) &&
-       !pcmcia_get_tuple_data(link, &tuple) &&
-       !pcmcia_parse_tuple(link, &tuple, &stk->parse))
-       is_kme = ((stk->parse.manfid.manf == MANFID_KME) &&
-                 ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) ||
-                  (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
+
+    is_kme = ((link->manf_id == MANFID_KME) &&
+             ((link->card_id == PRODID_KME_KXLC005_A) ||
+              (link->card_id == PRODID_KME_KXLC005_B)));
 
     /* Not sure if this is right... look up the current Vcc */
     CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &stk->conf));
@@ -408,8 +398,10 @@ static struct pcmcia_device_id ide_ids[] = {
        PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
        PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
        PCMCIA_DEVICE_PROD_ID1("TRANSCEND    512M   ", 0xd0909443),
+       PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
        PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
        PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
+       PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
        PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
        PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
        PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
index 2af634d7acf4a86de888b7077e037c5e5a4a0fe7..61f1a9665a7f911729ae17faad0492a5b89309fe 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/ide.h>
 #include <asm/io.h>
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_CHRP
 #include <asm/processor.h>
 #endif
 
@@ -282,11 +282,11 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
         * Find the ISA bridge to see how good the IDE is.
         */
        via_config = via_config_find(&isa);
-       if (!via_config->id) {
-               printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
-               pci_dev_put(isa);
-               return -ENODEV;
-       }
+
+       /* We checked this earlier so if it fails here deeep badness
+          is involved */
+
+       BUG_ON(!via_config->id);
 
        /*
         * Setup or disable Clk66 if appropriate
@@ -442,7 +442,7 @@ static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
        hwif->speedproc = &via_set_drive;
 
 
-#if defined(CONFIG_PPC_CHRP) && defined(CONFIG_PPC32)
+#ifdef CONFIG_PPC_CHRP
        if(machine_is(chrp) && _chrp_type == _CHRP_Pegasos) {
                hwif->irq = hwif->channel ? 15 : 14;
        }
@@ -494,6 +494,17 @@ static ide_pci_device_t via82cxxx_chipsets[] __devinitdata = {
 
 static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
+       struct pci_dev *isa = NULL;
+       struct via_isa_bridge *via_config;
+       /*
+        * Find the ISA bridge and check we know what it is.
+        */
+       via_config = via_config_find(&isa);
+       pci_dev_put(isa);
+       if (!via_config->id) {
+               printk(KERN_WARNING "VP_IDE: Unknown VIA SouthBridge, disabling DMA.\n");
+               return -ENODEV;
+       }
        return ide_setup_pci_device(dev, &via82cxxx_chipsets[id->driver_data]);
 }
 
index 31e5cc49d61a01899cea3e6d5220809f2b889be7..27d6c642415dd16af7d94bc81150c2137c1652ac 100644 (file)
@@ -133,7 +133,7 @@ struct eth1394_node_info {
 #define ETH1394_DRIVER_NAME "eth1394"
 static const char driver_name[] = ETH1394_DRIVER_NAME;
 
-static kmem_cache_t *packet_task_cache;
+static struct kmem_cache *packet_task_cache;
 
 static struct hpsb_highlevel eth1394_highlevel;
 
index d90a3a1898c0d4276bd6a1c7c0688f62a184a46b..b935e08695a995c3701c7ca06670c549797414ef 100644 (file)
 #include "config_roms.h"
 
 
-static void delayed_reset_bus(void * __reset_info)
+static void delayed_reset_bus(struct work_struct *work)
 {
-       struct hpsb_host *host = (struct hpsb_host*)__reset_info;
+       struct hpsb_host *host =
+               container_of(work, struct hpsb_host, delayed_reset.work);
        int generation = host->csr.generation + 1;
 
        /* The generation field rolls over to 2 rather than 0 per IEEE
@@ -122,7 +123,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
        int i;
        int hostnum = 0;
 
-       h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
+       h = kzalloc(sizeof(*h) + extra, GFP_KERNEL);
        if (!h)
                return NULL;
 
@@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
 
        atomic_set(&h->generation, 0);
 
-       INIT_WORK(&h->delayed_reset, delayed_reset_bus, h);
+       INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus);
        
        init_timer(&h->timeout);
        h->timeout.data = (unsigned long) h;
@@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
                 * Config ROM in the near future. */
                reset_delay = HZ;
 
-       PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host);
+       PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus);
        schedule_delayed_work(&host->delayed_reset, reset_delay);
 
        return 0;
index bc6dbfadb8914a751764a2704c9db751f63987f0..d553e38c95432c95d593d764472626b4e22f6a30 100644 (file)
@@ -62,7 +62,7 @@ struct hpsb_host {
        struct class_device class_dev;
 
        int update_config_rom;
-       struct work_struct delayed_reset;
+       struct delayed_work delayed_reset;
        unsigned int config_roms;
 
        struct list_head addr_space;
index 8e7b83f84485a1933b209d4017a74a288d19b64a..e829c9336b3c29ff0999fd3d89b012485fb322d9 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 #include <asm/atomic.h>
 
 #include "csr.h"
index 6e8ea9110c46cfcd92de98d01df75c1f1529ea0b..eae97d8dcf03dc907f7f1f98965de382cea712c2 100644 (file)
@@ -1225,7 +1225,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
        int ctx;
        int ret = -ENOMEM;
 
-       recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
+       recv = kmalloc(sizeof(*recv), GFP_KERNEL);
        if (!recv)
                return -ENOMEM;
 
@@ -1918,7 +1918,7 @@ static int ohci_iso_xmit_init(struct hpsb_iso *iso)
        int ctx;
        int ret = -ENOMEM;
 
-       xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
+       xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
        if (!xmit)
                return -ENOMEM;
 
@@ -3021,7 +3021,7 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
                        return -ENOMEM;
                }
 
-               d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+               d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
                OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
@@ -3117,7 +3117,7 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
        OHCI_DMA_ALLOC("dma_rcv prg pool");
 
        for (i = 0; i < d->num_desc; i++) {
-               d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
+               d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
                OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
 
                 if (d->prg_cpu[i] != NULL) {
index 0a7412e27eb49bf18e67408c81b6c56b4192346e..9cab1d66147238138930cbd7b06a8d5681fc6d74 100644 (file)
@@ -1428,7 +1428,7 @@ static int __devinit add_card(struct pci_dev *dev,
                struct i2c_algo_bit_data i2c_adapter_data;
 
                error = -ENOMEM;
-               i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
+               i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL);
                if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
 
                memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
index 5ec4f5eb6b19834b56fe7a53827321470c24ae20..bf71e069eaf5345cc39fe8b903ae66eb58074882 100644 (file)
@@ -112,7 +112,7 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
 
 static inline struct pending_request *alloc_pending_request(void)
 {
-       return __alloc_pending_request(SLAB_KERNEL);
+       return __alloc_pending_request(GFP_KERNEL);
 }
 
 static void free_pending_request(struct pending_request *req)
@@ -259,7 +259,7 @@ static void host_reset(struct hpsb_host *host)
        if (hi != NULL) {
                list_for_each_entry(fi, &hi->file_info_list, list) {
                        if (fi->notification == RAW1394_NOTIFY_ON) {
-                               req = __alloc_pending_request(SLAB_ATOMIC);
+                               req = __alloc_pending_request(GFP_ATOMIC);
 
                                if (req != NULL) {
                                        req->file_info = fi;
@@ -306,13 +306,13 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
                        if (!(fi->listen_channels & (1ULL << channel)))
                                continue;
 
-                       req = __alloc_pending_request(SLAB_ATOMIC);
+                       req = __alloc_pending_request(GFP_ATOMIC);
                        if (!req)
                                break;
 
                        if (!ibs) {
                                ibs = kmalloc(sizeof(*ibs) + length,
-                                             SLAB_ATOMIC);
+                                             GFP_ATOMIC);
                                if (!ibs) {
                                        kfree(req);
                                        break;
@@ -367,13 +367,13 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
                        if (!fi->fcp_buffer)
                                continue;
 
-                       req = __alloc_pending_request(SLAB_ATOMIC);
+                       req = __alloc_pending_request(GFP_ATOMIC);
                        if (!req)
                                break;
 
                        if (!ibs) {
                                ibs = kmalloc(sizeof(*ibs) + length,
-                                             SLAB_ATOMIC);
+                                             GFP_ATOMIC);
                                if (!ibs) {
                                        kfree(req);
                                        break;
@@ -593,7 +593,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
        switch (req->req.type) {
        case RAW1394_REQ_LIST_CARDS:
                spin_lock_irqsave(&host_info_lock, flags);
-               khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
+               khl = kmalloc(sizeof(*khl) * host_count, GFP_ATOMIC);
 
                if (khl) {
                        req->req.misc = host_count;
@@ -1045,7 +1045,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
        }
        if (arm_addr->notification_options & ARM_READ) {
                DBGMSG("arm_read -> entering notification-section");
-               req = __alloc_pending_request(SLAB_ATOMIC);
+               req = __alloc_pending_request(GFP_ATOMIC);
                if (!req) {
                        DBGMSG("arm_read -> rcode_conflict_error");
                        spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1064,7 +1064,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer,
                            sizeof(struct arm_response) +
                            sizeof(struct arm_request_response);
                }
-               req->data = kmalloc(size, SLAB_ATOMIC);
+               req->data = kmalloc(size, GFP_ATOMIC);
                if (!(req->data)) {
                        free_pending_request(req);
                        DBGMSG("arm_read -> rcode_conflict_error");
@@ -1198,7 +1198,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
        }
        if (arm_addr->notification_options & ARM_WRITE) {
                DBGMSG("arm_write -> entering notification-section");
-               req = __alloc_pending_request(SLAB_ATOMIC);
+               req = __alloc_pending_request(GFP_ATOMIC);
                if (!req) {
                        DBGMSG("arm_write -> rcode_conflict_error");
                        spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1209,7 +1209,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid,
                    sizeof(struct arm_request) + sizeof(struct arm_response) +
                    (length) * sizeof(byte_t) +
                    sizeof(struct arm_request_response);
-               req->data = kmalloc(size, SLAB_ATOMIC);
+               req->data = kmalloc(size, GFP_ATOMIC);
                if (!(req->data)) {
                        free_pending_request(req);
                        DBGMSG("arm_write -> rcode_conflict_error");
@@ -1400,7 +1400,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
        if (arm_addr->notification_options & ARM_LOCK) {
                byte_t *buf1, *buf2;
                DBGMSG("arm_lock -> entering notification-section");
-               req = __alloc_pending_request(SLAB_ATOMIC);
+               req = __alloc_pending_request(GFP_ATOMIC);
                if (!req) {
                        DBGMSG("arm_lock -> rcode_conflict_error");
                        spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1408,7 +1408,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
                                                           The request may be retried */
                }
                size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);     /* maximum */
-               req->data = kmalloc(size, SLAB_ATOMIC);
+               req->data = kmalloc(size, GFP_ATOMIC);
                if (!(req->data)) {
                        free_pending_request(req);
                        DBGMSG("arm_lock -> rcode_conflict_error");
@@ -1628,7 +1628,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
        if (arm_addr->notification_options & ARM_LOCK) {
                byte_t *buf1, *buf2;
                DBGMSG("arm_lock64 -> entering notification-section");
-               req = __alloc_pending_request(SLAB_ATOMIC);
+               req = __alloc_pending_request(GFP_ATOMIC);
                if (!req) {
                        spin_unlock_irqrestore(&host_info_lock, irqflags);
                        DBGMSG("arm_lock64 -> rcode_conflict_error");
@@ -1636,7 +1636,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
                                                           The request may be retried */
                }
                size = sizeof(struct arm_request) + sizeof(struct arm_response) + 3 * sizeof(*store) + sizeof(struct arm_request_response);     /* maximum */
-               req->data = kmalloc(size, SLAB_ATOMIC);
+               req->data = kmalloc(size, GFP_ATOMIC);
                if (!(req->data)) {
                        free_pending_request(req);
                        spin_unlock_irqrestore(&host_info_lock, irqflags);
@@ -1737,7 +1737,7 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
                return (-EINVAL);
        }
        /* addr-list-entry for fileinfo */
-       addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
+       addr = kmalloc(sizeof(*addr), GFP_KERNEL);
        if (!addr) {
                req->req.length = 0;
                return (-ENOMEM);
@@ -2103,7 +2103,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req)
 static int get_config_rom(struct file_info *fi, struct pending_request *req)
 {
        int ret = sizeof(struct raw1394_request);
-       quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+       quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
        int status;
 
        if (!data)
@@ -2133,7 +2133,7 @@ static int get_config_rom(struct file_info *fi, struct pending_request *req)
 static int update_config_rom(struct file_info *fi, struct pending_request *req)
 {
        int ret = sizeof(struct raw1394_request);
-       quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL);
+       quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL);
        if (!data)
                return -ENOMEM;
        if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) {
@@ -2443,7 +2443,7 @@ static void queue_rawiso_event(struct file_info *fi)
        /* only one ISO activity event may be in the queue */
        if (!__rawiso_event_in_queue(fi)) {
                struct pending_request *req =
-                   __alloc_pending_request(SLAB_ATOMIC);
+                   __alloc_pending_request(GFP_ATOMIC);
 
                if (req) {
                        req->file_info = fi;
@@ -2779,7 +2779,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
 {
        struct file_info *fi;
 
-       fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
+       fi = kzalloc(sizeof(*fi), GFP_KERNEL);
        if (!fi)
                return -ENOMEM;
 
index 6986ac188281745eda188c9b1c26110a9fe7f62f..cd156d4e779e973511e67562ab0f1d9f9919bed5 100644 (file)
@@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id,
                scsi_unblock_requests(scsi_id->scsi_host);
 }
 
-static void sbp2util_write_orb_pointer(void *p)
+static void sbp2util_write_orb_pointer(struct work_struct *work)
 {
+       struct scsi_id_instance_data *scsi_id =
+               container_of(work, struct scsi_id_instance_data,
+                            protocol_work.work);
        quadlet_t data[2];
 
-       data[0] = ORB_SET_NODE_ID(
-                       ((struct scsi_id_instance_data *)p)->hi->host->node_id);
-       data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma;
+       data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id);
+       data[1] = scsi_id->last_orb_dma;
        sbp2util_cpu_to_be32_buffer(data, 8);
-       sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8);
+       sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8);
 }
 
-static void sbp2util_write_doorbell(void *p)
+static void sbp2util_write_doorbell(struct work_struct *work)
 {
-       sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4);
+       struct scsi_id_instance_data *scsi_id =
+               container_of(work, struct scsi_id_instance_data,
+                            protocol_work.work);
+       sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4);
 }
 
 /*
@@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
        INIT_LIST_HEAD(&scsi_id->scsi_list);
        spin_lock_init(&scsi_id->sbp2_command_orb_lock);
        atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING);
-       INIT_WORK(&scsi_id->protocol_work, NULL, NULL);
+       INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL);
 
        ud->device.driver_data = scsi_id;
 
@@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
                 * We do not accept new commands until the job is over.
                 */
                scsi_block_requests(scsi_id->scsi_host);
-               PREPARE_WORK(&scsi_id->protocol_work,
+               PREPARE_DELAYED_WORK(&scsi_id->protocol_work,
                             last_orb ? sbp2util_write_doorbell:
-                                       sbp2util_write_orb_pointer,
-                            scsi_id);
-               schedule_work(&scsi_id->protocol_work);
+                                       sbp2util_write_orb_pointer);
+               schedule_delayed_work(&scsi_id->protocol_work, 0);
        }
 }
 
index abbe48e646c3a7397d8a355de0e8f0757b426647..1b16d6b9cf11a0f55e291a7bbe064687b8d12ede 100644 (file)
@@ -348,7 +348,7 @@ struct scsi_id_instance_data {
        unsigned workarounds;
 
        atomic_t state;
-       struct work_struct protocol_work;
+       struct delayed_work protocol_work;
 };
 
 /* For use in scsi_id_instance_data.state */
index 7767a11b6890d4d88d2c61d1a3adcb3730a9c2e4..af939796750dba07fd713f83dc3cf473ce52c43e 100644 (file)
@@ -55,11 +55,11 @@ struct addr_req {
        int status;
 };
 
-static void process_req(void *data);
+static void process_req(struct work_struct *work);
 
 static DEFINE_MUTEX(lock);
 static LIST_HEAD(req_list);
-static DECLARE_WORK(work, process_req, NULL);
+static DECLARE_DELAYED_WORK(work, process_req);
 static struct workqueue_struct *addr_wq;
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -215,7 +215,7 @@ out:
        return ret;
 }
 
-static void process_req(void *data)
+static void process_req(struct work_struct *work)
 {
        struct addr_req *req, *temp_req;
        struct sockaddr_in *src_in, *dst_in;
index 20e9f64e67a6cba164aba9ee81b9653ddc207141..98272fbbfb31e3d1ff4e8a010d6a7f1489a58bf8 100644 (file)
@@ -285,9 +285,10 @@ err:
        kfree(tprops);
 }
 
-static void ib_cache_task(void *work_ptr)
+static void ib_cache_task(struct work_struct *_work)
 {
-       struct ib_update_work *work = work_ptr;
+       struct ib_update_work *work =
+               container_of(_work, struct ib_update_work, work);
 
        ib_cache_update(work->device, work->port_num);
        kfree(work);
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
            event->event == IB_EVENT_CLIENT_REREGISTER) {
                work = kmalloc(sizeof *work, GFP_ATOMIC);
                if (work) {
-                       INIT_WORK(&work->work, ib_cache_task, work);
+                       INIT_WORK(&work->work, ib_cache_task);
                        work->device   = event->device;
                        work->port_num = event->element.port_num;
                        schedule_work(&work->work);
index e5dc4530808aac8c8e8d410ca32f47ae60cf5cfe..79c937bf696259096fd0a168cfed6528e2fe4e5f 100644 (file)
@@ -101,7 +101,7 @@ struct cm_av {
 };
 
 struct cm_work {
-       struct work_struct work;
+       struct delayed_work work;
        struct list_head list;
        struct cm_port *port;
        struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
@@ -161,7 +161,7 @@ struct cm_id_private {
        atomic_t work_count;
 };
 
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
 
 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 {
@@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
                return ERR_PTR(-ENOMEM);
 
        timewait_info->work.local_id = local_id;
-       INIT_WORK(&timewait_info->work.work, cm_work_handler,
-                 &timewait_info->work);
+       INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
        timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
        return timewait_info;
 }
@@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
        }
 }
 
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
 {
-       struct cm_work *work = data;
+       struct cm_work *work = container_of(_work, struct cm_work, work.work);
        int ret;
 
        switch (work->cm_event.event) {
@@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id)
         * we need to find the cm_id once we're in the context of the
         * worker thread, rather than holding a reference on it.
         */
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->local_id = cm_id->local_id;
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 out:
        return ret;
 }
@@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
                return;
        }
 
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = (struct cm_port *)mad_agent->context;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
index cf48f269743449b6a4797a2727e84148fe1bf176..985a6b564d8feec4781cefd7f3316cf1793013ce 100644 (file)
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
        return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
 
-static void cma_work_handler(void *data)
+static void cma_work_handler(struct work_struct *_work)
 {
-       struct cma_work *work = data;
+       struct cma_work *work = container_of(_work, struct cma_work, work);
        struct rdma_id_private *id_priv = work->id;
        int destroy = 0;
 
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
                return -ENOMEM;
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
                return -ENOMEM;
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ROUTE_QUERY;
        work->new_state = CMA_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
@@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
        }
 
        work->id = id_priv;
-       INIT_WORK(&work->work, cma_work_handler, work);
+       INIT_WORK(&work->work, cma_work_handler);
        work->old_state = CMA_ADDR_QUERY;
        work->new_state = CMA_ADDR_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
index cf797d7aea09a0673617a1e04aa56da384509d8c..1039ad57d53b4b0f3ef2d252c51fb9a47bfd68f0 100644 (file)
@@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv,
  * thread asleep on the destroy_comp list vs. an object destroyed
  * here synchronously when the last reference is removed.
  */
-static void cm_work_handler(void *arg)
+static void cm_work_handler(struct work_struct *_work)
 {
-       struct iwcm_work *work = arg;
+       struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
        struct iw_cm_event levent;
        struct iwcm_id_private *cm_id_priv = work->cm_id;
        unsigned long flags;
@@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id,
                goto out;
        }
 
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_WORK(&work->work, cm_work_handler);
        work->cm_id = cm_id_priv;
        work->event = *iw_event;
 
index 3f9c16232c4d7f95971ba8298f6cef738cd1b3cb..15f38d94b3a853b3a86f002cd11c8e0333a12ba8 100644 (file)
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent(
 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                    struct ib_mad_private *mad);
 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
-static void timeout_sends(void *data);
-static void local_completions(void *data);
+static void timeout_sends(struct work_struct *work);
+static void local_completions(struct work_struct *work);
 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
                              struct ib_mad_agent_private *agent_priv,
                              u8 mgmt_class);
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
        INIT_LIST_HEAD(&mad_agent_priv->wait_list);
        INIT_LIST_HEAD(&mad_agent_priv->done_list);
        INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
-       INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
+       INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
        INIT_LIST_HEAD(&mad_agent_priv->local_list);
-       INIT_WORK(&mad_agent_priv->local_work, local_completions,
-                  mad_agent_priv);
+       INIT_WORK(&mad_agent_priv->local_work, local_completions);
        atomic_set(&mad_agent_priv->refcount, 1);
        init_completion(&mad_agent_priv->comp);
 
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
 /*
  * IB MAD completion callback
  */
-static void ib_mad_completion_handler(void *data)
+static void ib_mad_completion_handler(struct work_struct *work)
 {
        struct ib_mad_port_private *port_priv;
        struct ib_wc wc;
 
-       port_priv = (struct ib_mad_port_private *)data;
+       port_priv = container_of(work, struct ib_mad_port_private, work);
        ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
 
        while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
 }
 EXPORT_SYMBOL(ib_cancel_mad);
 
-static void local_completions(void *data)
+static void local_completions(struct work_struct *work)
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_local_private *local;
@@ -2334,7 +2333,8 @@ static void local_completions(void *data)
        struct ib_wc wc;
        struct ib_mad_send_wc mad_send_wc;
 
-       mad_agent_priv = (struct ib_mad_agent_private *)data;
+       mad_agent_priv =
+               container_of(work, struct ib_mad_agent_private, local_work);
 
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
        while (!list_empty(&mad_agent_priv->local_list)) {
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
        return ret;
 }
 
-static void timeout_sends(void *data)
+static void timeout_sends(struct work_struct *work)
 {
        struct ib_mad_agent_private *mad_agent_priv;
        struct ib_mad_send_wr_private *mad_send_wr;
        struct ib_mad_send_wc mad_send_wc;
        unsigned long flags, delay;
 
-       mad_agent_priv = (struct ib_mad_agent_private *)data;
+       mad_agent_priv = container_of(work, struct ib_mad_agent_private,
+                                     timed_work.work);
        mad_send_wc.vendor_err = 0;
 
        spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
                ret = -ENOMEM;
                goto error8;
        }
-       INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv);
+       INIT_WORK(&port_priv->work, ib_mad_completion_handler);
 
        spin_lock_irqsave(&ib_mad_port_list_lock, flags);
        list_add_tail(&port_priv->port_list, &ib_mad_port_list);
index d06b59083f6ea91f59a2e0698b61b2a322471a39..d5548e73e0685b4edf8f329fe4f8cba8dd5dd1e3 100644 (file)
@@ -102,7 +102,7 @@ struct ib_mad_agent_private {
        struct list_head send_list;
        struct list_head wait_list;
        struct list_head done_list;
-       struct work_struct timed_work;
+       struct delayed_work timed_work;
        unsigned long timeout;
        struct list_head local_list;
        struct work_struct local_work;
index 1ef79d015a1e32010c392bc6641e0a2dce99bb3f..3663fd7022be6764d60d19ff7a19039931802723 100644 (file)
@@ -45,8 +45,8 @@ enum rmpp_state {
 struct mad_rmpp_recv {
        struct ib_mad_agent_private *agent;
        struct list_head list;
-       struct work_struct timeout_work;
-       struct work_struct cleanup_work;
+       struct delayed_work timeout_work;
+       struct delayed_work cleanup_work;
        struct completion comp;
        enum rmpp_state state;
        spinlock_t lock;
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent,
        }
 }
 
-static void recv_timeout_handler(void *data)
+static void recv_timeout_handler(struct work_struct *work)
 {
-       struct mad_rmpp_recv *rmpp_recv = data;
+       struct mad_rmpp_recv *rmpp_recv =
+               container_of(work, struct mad_rmpp_recv, timeout_work.work);
        struct ib_mad_recv_wc *rmpp_wc;
        unsigned long flags;
 
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data)
        ib_free_recv_mad(rmpp_wc);
 }
 
-static void recv_cleanup_handler(void *data)
+static void recv_cleanup_handler(struct work_struct *work)
 {
-       struct mad_rmpp_recv *rmpp_recv = data;
+       struct mad_rmpp_recv *rmpp_recv =
+               container_of(work, struct mad_rmpp_recv, cleanup_work.work);
        unsigned long flags;
 
        spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
 
        rmpp_recv->agent = agent;
        init_completion(&rmpp_recv->comp);
-       INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
-       INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
+       INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
+       INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
        spin_lock_init(&rmpp_recv->lock);
        rmpp_recv->state = RMPP_STATE_ACTIVE;
        atomic_set(&rmpp_recv->refcount, 1);
index 1706d3c7e95eaf2f8f8315cfab1897fb940eeaa8..e45afba75341ee86d80464406564c3492366edbd 100644 (file)
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref)
        kfree(sm_ah);
 }
 
-static void update_sm_ah(void *port_ptr)
+static void update_sm_ah(struct work_struct *work)
 {
-       struct ib_sa_port *port = port_ptr;
+       struct ib_sa_port *port =
+               container_of(work, struct ib_sa_port, update_task);
        struct ib_sa_sm_ah *new_ah, *old_ah;
        struct ib_port_attr port_attr;
        struct ib_ah_attr   ah_attr;
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device)
                if (IS_ERR(sa_dev->port[i].agent))
                        goto err;
 
-               INIT_WORK(&sa_dev->port[i].update_task,
-                         update_sm_ah, &sa_dev->port[i]);
+               INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
        }
 
        ib_set_client_data(device, &sa_client, sa_dev);
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
                goto err;
 
        for (i = 0; i <= e - s; ++i)
-               update_sm_ah(&sa_dev->port[i]);
+               update_sm_ah(&sa_dev->port[i].update_task);
 
        return;
 
index efe147dbeb42137e695298e19c525e1acf027a07..db12cc0841df251273e62fed86966cd9e60b08e6 100644 (file)
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
        up_write(&current->mm->mmap_sem);
 }
 
-static void ib_umem_account(void *work_ptr)
+static void ib_umem_account(struct work_struct *_work)
 {
-       struct ib_umem_account_work *work = work_ptr;
+       struct ib_umem_account_work *work =
+               container_of(_work, struct ib_umem_account_work, work);
 
        down_write(&work->mm->mmap_sem);
        work->mm->locked_vm -= work->diff;
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
                return;
        }
 
-       INIT_WORK(&work->work, ib_umem_account, work);
+       INIT_WORK(&work->work, ib_umem_account);
        work->mm   = mm;
        work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
 
index 40caeb5f41b467ff6c9dee2ffb588e285389c7f0..36620a22413cd1f5f40e99e9f5ee138ad4a0ad85 100644 (file)
@@ -164,7 +164,7 @@ void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
  */
 void *vq_repbuf_alloc(struct c2_dev *c2dev)
 {
-       return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC);
+       return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
 }
 
 /*
index 214e2fdddeef0792572aaf0fcd333aa1ae94016b..0d6e2c4bb2451f27f78dfea5aa191c1f66cebe58 100644 (file)
@@ -57,7 +57,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
        struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
                                              ib_device);
 
-       av = kmem_cache_alloc(av_cache, SLAB_KERNEL);
+       av = kmem_cache_alloc(av_cache, GFP_KERNEL);
        if (!av) {
                ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
                         pd, ah_attr);
index 458fe19648a10fc7ed8175f6495cf43672465511..93995b658d94a7dedbe816f2927589a63b51dde4 100644 (file)
@@ -134,7 +134,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
        if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
                return ERR_PTR(-EINVAL);
 
-       my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL);
+       my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL);
        if (!my_cq) {
                ehca_err(device, "Out of memory for ehca_cq struct device=%p",
                         device);
index 3d1c1c535038c0ce0460ad1f63a87037e401501b..cc47e4c13a180897fa3aee03a589d197b48367db 100644 (file)
@@ -108,7 +108,7 @@ static struct kmem_cache *ctblk_cache = NULL;
 
 void *ehca_alloc_fw_ctrlblock(void)
 {
-       void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
+       void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL);
        if (!ret)
                ehca_gen_err("Out of memory for ctblk");
        return ret;
index abce676c0ae0ec8a6dd8c824e6f75a7130d91fcf..0a5e2214cc5f72b30ed36d0e0e95384fea4d6b0f 100644 (file)
@@ -53,7 +53,7 @@ static struct ehca_mr *ehca_mr_new(void)
 {
        struct ehca_mr *me;
 
-       me = kmem_cache_alloc(mr_cache, SLAB_KERNEL);
+       me = kmem_cache_alloc(mr_cache, GFP_KERNEL);
        if (me) {
                memset(me, 0, sizeof(struct ehca_mr));
                spin_lock_init(&me->mrlock);
@@ -72,7 +72,7 @@ static struct ehca_mw *ehca_mw_new(void)
 {
        struct ehca_mw *me;
 
-       me = kmem_cache_alloc(mw_cache, SLAB_KERNEL);
+       me = kmem_cache_alloc(mw_cache, GFP_KERNEL);
        if (me) {
                memset(me, 0, sizeof(struct ehca_mw));
                spin_lock_init(&me->mwlock);
index 2c3cdc6f7b39ef5982876bf5dfae77108d374888..d5345e5b3cd661a0de94104fb16bd4f1fca33c76 100644 (file)
@@ -50,7 +50,7 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
 {
        struct ehca_pd *pd;
 
-       pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL);
+       pd = kmem_cache_alloc(pd_cache, GFP_KERNEL);
        if (!pd) {
                ehca_err(device, "device=%p context=%p out of memory",
                         device, context);
index 8682aa50c707bf665f9ac3ce59a95cdeedae4867..c6c9cef203e3361fb2868222cb4a405487b84087 100644 (file)
@@ -450,7 +450,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
        if (pd->uobject && udata)
                context = pd->uobject->context;
 
-       my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL);
+       my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL);
        if (!my_qp) {
                ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
                return ERR_PTR(-ENOMEM);
index 413754b1d8a254d6fb26111dfe743a74d13ddcfc..8536aeb96af862ee2581c307f1ef25155850eb00 100644 (file)
@@ -214,9 +214,10 @@ struct ipath_user_pages_work {
        unsigned long num_pages;
 };
 
-static void user_pages_account(void *ptr)
+static void user_pages_account(struct work_struct *_work)
 {
-       struct ipath_user_pages_work *work = ptr;
+       struct ipath_user_pages_work *work =
+               container_of(_work, struct ipath_user_pages_work, work);
 
        down_write(&work->mm->mmap_sem);
        work->mm->locked_vm -= work->num_pages;
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
 
        goto bail;
 
-       INIT_WORK(&work->work, user_pages_account, work);
+       INIT_WORK(&work->work, user_pages_account);
        work->mm = mm;
        work->num_pages = num_pages;
 
index 57cdc1bc5f508d0168ca522b259fbd955919398e..27caf3b0648a64d69944a3bf7fee8c0bfd1f9609 100644 (file)
@@ -189,7 +189,7 @@ int mthca_create_ah(struct mthca_dev *dev,
 on_hca_fail:
        if (ah->type == MTHCA_AH_PCI_POOL) {
                ah->av = pci_pool_alloc(dev->av_table.pool,
-                                       SLAB_ATOMIC, &ah->avdma);
+                                       GFP_ATOMIC, &ah->avdma);
                if (!ah->av)
                        return -ENOMEM;
 
index cd044ea2dfa42ecd53d967fbe46356e85fa104be..e948158a28d918673150c60323b3952e07fb2ca6 100644 (file)
@@ -57,7 +57,7 @@ static int catas_reset_disable;
 module_param_named(catas_reset_disable, catas_reset_disable, int, 0644);
 MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero");
 
-static void catas_reset(void *work_ptr)
+static void catas_reset(struct work_struct *work)
 {
        struct mthca_dev *dev, *tmpdev;
        LIST_HEAD(tlist);
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
 
 int __init mthca_catas_init(void)
 {
-       INIT_WORK(&catas_work, catas_reset, NULL);
+       INIT_WORK(&catas_work, catas_reset);
 
        catas_wq = create_singlethread_workqueue("mthca_catas");
        if (!catas_wq)
index f2b61851a49c5b3e5838cb193fe983e87e36246e..99547996aba2e742e2c8f1aea10106f7ebeccbe1 100644 (file)
@@ -136,11 +136,11 @@ struct ipoib_dev_priv {
        struct list_head multicast_list;
        struct rb_root multicast_tree;
 
-       struct work_struct pkey_task;
-       struct work_struct mcast_task;
+       struct delayed_work pkey_task;
+       struct delayed_work mcast_task;
        struct work_struct flush_task;
        struct work_struct restart_task;
-       struct work_struct ah_reap_task;
+       struct delayed_work ah_reap_task;
 
        struct ib_device *ca;
        u8                port;
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev);
 
 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                struct ipoib_ah *address, u32 qpn);
-void ipoib_reap_ah(void *dev_ptr);
+void ipoib_reap_ah(struct work_struct *work);
 
 void ipoib_flush_paths(struct net_device *dev);
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
 
 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
-void ipoib_ib_dev_flush(void *dev);
+void ipoib_ib_dev_flush(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
 int ipoib_ib_dev_open(struct net_device *dev);
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev);
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
 void ipoib_dev_cleanup(struct net_device *dev);
 
-void ipoib_mcast_join_task(void *dev_ptr);
+void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
 
-void ipoib_mcast_restart_task(void *dev_ptr);
+void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
 int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler,
 int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
 int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
 
-void ipoib_pkey_poll(void *dev);
+void ipoib_pkey_poll(struct work_struct *work);
 int ipoib_pkey_dev_delay_open(struct net_device *dev);
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
index 8bf5e9ec7c9522345696a2819ee99f444af341e7..f10fba5d32650e31ae1141cbe2a1caa27f3cb827 100644 (file)
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev)
        spin_unlock_irq(&priv->tx_lock);
 }
 
-void ipoib_reap_ah(void *dev_ptr)
+void ipoib_reap_ah(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
+       struct net_device *dev = priv->dev;
 
        __ipoib_reap_ah(dev);
 
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        return 0;
 }
 
-void ipoib_ib_dev_flush(void *_dev)
+void ipoib_ib_dev_flush(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *)_dev;
-       struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
+       struct ipoib_dev_priv *cpriv, *priv =
+               container_of(work, struct ipoib_dev_priv, flush_task);
+       struct net_device *dev = priv->dev;
 
        if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) {
                ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev)
         */
        if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
                ipoib_ib_dev_up(dev);
-               ipoib_mcast_restart_task(dev);
+               ipoib_mcast_restart_task(&priv->restart_task);
        }
 
        mutex_lock(&priv->vlan_mutex);
 
        /* Flush any child interfaces too */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
-               ipoib_ib_dev_flush(cpriv->dev);
+               ipoib_ib_dev_flush(&cpriv->flush_task);
 
        mutex_unlock(&priv->vlan_mutex);
 }
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
  * change async notification is available.
  */
 
-void ipoib_pkey_poll(void *dev_ptr)
+void ipoib_pkey_poll(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, pkey_task.work);
+       struct net_device *dev = priv->dev;
 
        ipoib_pkey_dev_check_presence(dev);
 
index 5ba3154320b4f9cb3c2dd8a80cd635e65a0d7636..c092802437263ca5c7ea1f5a05bcd2c37e5fa2f7 100644 (file)
@@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev)
        INIT_LIST_HEAD(&priv->dead_ahs);
        INIT_LIST_HEAD(&priv->multicast_list);
 
-       INIT_WORK(&priv->pkey_task,    ipoib_pkey_poll,          priv->dev);
-       INIT_WORK(&priv->mcast_task,   ipoib_mcast_join_task,    priv->dev);
-       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush,       priv->dev);
-       INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
-       INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah,            priv->dev);
+       INIT_DELAYED_WORK(&priv->pkey_task,    ipoib_pkey_poll);
+       INIT_DELAYED_WORK(&priv->mcast_task,   ipoib_mcast_join_task);
+       INIT_WORK(&priv->flush_task,   ipoib_ib_dev_flush);
+       INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
+       INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
 }
 
 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
index d282d65e3ee00bb312634b0a62a93c0188d68afc..b04b72ca32eda5816e79780ed5553f51d4e53d1e 100644 (file)
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
                mcast->backoff = 1;
                mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
                mutex_unlock(&mcast_mutex);
                complete(&mcast->done);
                return;
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
 
        if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
                if (status == -ETIMEDOUT)
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+                                          0);
                else
                        queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                           mcast->backoff * HZ);
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                mcast->query_id = ret;
 }
 
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, mcast_task.work);
+       struct net_device *dev = priv->dev;
 
        if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
                return;
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_work(ipoib_workqueue, &priv->mcast_task);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        spin_lock_irq(&priv->lock);
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 }
 
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, restart_task);
+       struct net_device *dev = priv->dev;
        struct dev_mc_list *mclist;
        struct ipoib_mcast *mcast, *tmcast;
        LIST_HEAD(remove_list);
index 18a0000349965dc03438ecf641358c18efdbdf18..693b770028971102d0e364919f7091d776ed6abb 100644 (file)
@@ -48,7 +48,7 @@
 
 static void iser_cq_tasklet_fn(unsigned long data);
 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(void *data);
+static void iser_comp_error_worker(struct work_struct *work);
 
 static void iser_cq_event_callback(struct ib_event *cause, void *context)
 {
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn)
        init_waitqueue_head(&ib_conn->wait);
        atomic_set(&ib_conn->post_recv_buf_count, 0);
        atomic_set(&ib_conn->post_send_buf_count, 0);
-       INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker,
-                 ib_conn);
+       INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
        INIT_LIST_HEAD(&ib_conn->conn_list);
        spin_lock_init(&ib_conn->lock);
 
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc)
        return ret_val;
 }
 
-static void iser_comp_error_worker(void *data)
+static void iser_comp_error_worker(struct work_struct *work)
 {
-       struct iser_conn *ib_conn = data;
+       struct iser_conn *ib_conn =
+               container_of(work, struct iser_conn, comperror_work);
 
        /* getting here when the state is UP means that the conn is being *
         * terminated asynchronously from the iSCSI layer's perspective.  */
index 64ab5fc7cca38db98ad503dbf4e8cf2a5fbd7eb3..a6289595557b704bc1a21d478e77047f6526c194 100644 (file)
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target)
        wait_for_completion(&target->done);
 }
 
-static void srp_remove_work(void *target_ptr)
+static void srp_remove_work(struct work_struct *work)
 {
-       struct srp_target_port *target = target_ptr;
+       struct srp_target_port *target =
+               container_of(work, struct srp_target_port, work);
 
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state != SRP_TARGET_DEAD) {
@@ -575,7 +576,7 @@ err:
        spin_lock_irq(target->scsi_host->host_lock);
        if (target->state == SRP_TARGET_CONNECTING) {
                target->state = SRP_TARGET_DEAD;
-               INIT_WORK(&target->work, srp_remove_work, target);
+               INIT_WORK(&target->work, srp_remove_work);
                schedule_work(&target->work);
        }
        spin_unlock_irq(target->scsi_host->host_lock);
index a0af97efe6ac0e23fd440cc1608db170890a005d..79dfb4b25c97126ef1817bafb28195073f0727d7 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>       /* HZ */
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 /*#include <asm/io.h>*/
 
index cbb93669d1cef7b9a3188ee74a008de927c1f2c7..8451b29a3db534ff1b1d746b35e88d334b7b07dc 100644 (file)
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd)
  * interrupt context.
  */
 
-static void atkbd_event_work(void *data)
+static void atkbd_event_work(struct work_struct *work)
 {
-       struct atkbd *atkbd = data;
+       struct atkbd *atkbd = container_of(work, struct atkbd, event_work);
 
        mutex_lock(&atkbd->event_mutex);
 
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
 
        atkbd->dev = dev;
        ps2_init(&atkbd->ps2dev, serio);
-       INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd);
+       INIT_WORK(&atkbd->event_work, atkbd_event_work);
        mutex_init(&atkbd->event_mutex);
 
        switch (serio->id.type) {
index 979b93e33da7a9e0461594887267d17c8e3891a0..b7f049b45b6bffc7fb0230a86afe23b2cf8953dc 100644 (file)
@@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code,
  * were in.
  */
 static void
-lkkbd_reinit (void *data)
+lkkbd_reinit (struct work_struct *work)
 {
-       struct lkkbd *lk = data;
+       struct lkkbd *lk = container_of(work, struct lkkbd, tq);
        int division;
        unsigned char leds_on = 0;
        unsigned char leds_off = 0;
@@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv)
 
        lk->serio = serio;
        lk->dev = input_dev;
-       INIT_WORK (&lk->tq, lkkbd_reinit, lk);
+       INIT_WORK (&lk->tq, lkkbd_reinit);
        lk->bell_volume = bell_volume;
        lk->keyclick_volume = keyclick_volume;
        lk->ctrlclick_volume = ctrlclick_volume;
index cac4781103c3ed1cb45a31d22bc0486aa5492e02..6cd887c5eb0a1082560d61172e1dc9bef1b7f0e9 100644 (file)
@@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
  * were in.
  */
 
-static void sunkbd_reinit(void *data)
+static void sunkbd_reinit(struct work_struct *work)
 {
-       struct sunkbd *sunkbd = data;
+       struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
 
        wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
 
@@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv)
        sunkbd->serio = serio;
        sunkbd->dev = input_dev;
        init_waitqueue_head(&sunkbd->wait);
-       INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd);
+       INIT_WORK(&sunkbd->tq, sunkbd_reinit);
        snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys);
 
        serio_set_drvdata(serio, sunkbd);
index ab4da79ee560d9279afc989d129715c45dd1593c..31d5a13bfd6bb3a534b11918676fda5fb06112fe 100644 (file)
@@ -695,7 +695,9 @@ static int __init hp_sdc_rtc_init(void)
 
        if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
                return ret;
-       misc_register(&hp_sdc_rtc_dev);
+       if (misc_register(&hp_sdc_rtc_dev) != 0)
+               printk(KERN_INFO "Could not register misc. dev for i8042 rtc\n");
+
         create_proc_read_entry ("driver/rtc", 0, NULL,
                                hp_sdc_rtc_read_proc, NULL);
 
index 6f9b2c7cc9c28903c05a51b682975c112996d129..52bb2226ce2fb9ac816018da35a0a232d050a692 100644 (file)
@@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse)
  * psmouse_resync() attempts to re-validate current protocol.
  */
 
-static void psmouse_resync(void *p)
+static void psmouse_resync(struct work_struct *work)
 {
-       struct psmouse *psmouse = p, *parent = NULL;
+       struct psmouse *parent = NULL, *psmouse =
+               container_of(work, struct psmouse, resync_work);
        struct serio *serio = psmouse->ps2dev.serio;
        psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
        int failed = 0, enabled = 0;
@@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
                goto out;
 
        ps2_init(&psmouse->ps2dev, serio);
-       INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
+       INIT_WORK(&psmouse->resync_work, psmouse_resync);
        psmouse->dev = input_dev;
        snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
 
index e5b1b60757bb8d7a965aa92a87ffbae2e0264042..b3e84d3bb7f7cc22f38883d6ee65966c2a345b36 100644 (file)
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command);
  * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.)
  */
 
-static void ps2_execute_scheduled_command(void *data)
+static void ps2_execute_scheduled_command(struct work_struct *work)
 {
-       struct ps2work *ps2work = data;
+       struct ps2work *ps2work = container_of(work, struct ps2work, work);
 
        ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command);
        kfree(ps2work);
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman
        ps2work->ps2dev = ps2dev;
        ps2work->command = command;
        memcpy(ps2work->param, param, send);
-       INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work);
+       INIT_WORK(&ps2work->work, ps2_execute_scheduled_command);
 
        if (!schedule_work(&ps2work->work)) {
                kfree(ps2work);
index 211943f85cb66e1dee2b77aac5c3e30ce3d6c07a..5f1d4032fd57fbddc4faf65635b416c9a8daf74f 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Serio abstraction core");
index f56d6a0f0624ad06c602e52e52e39833959d0e17..0517c7387d67511593f07a40b67146eb51801760 100644 (file)
@@ -189,7 +189,7 @@ static int ads7846_read12_ser(struct device *dev, unsigned command)
 {
        struct spi_device       *spi = to_spi_device(dev);
        struct ads7846          *ts = dev_get_drvdata(dev);
-       struct ser_req          *req = kzalloc(sizeof *req, SLAB_KERNEL);
+       struct ser_req          *req = kzalloc(sizeof *req, GFP_KERNEL);
        int                     status;
        int                     sample;
        int                     i;
index 6ae6eb32211141a93c299a65f15dd8bd14e20a79..946c38cf6f8a66322923cb4104d5ebc268fd62cf 100644 (file)
@@ -627,8 +627,10 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) {
 }
 
 void
-actcapi_dispatch(act2000_card *card)
+actcapi_dispatch(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, rcv_tq);
        struct sk_buff *skb;
        actcapi_msg *msg;
        __u16 ccmd;
index 49f453c53c64d9de0c1784df30b65e171c6792f1..e55f6a931f661e82665d26a8ced3edcdba924b71 100644 (file)
@@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int
 extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *);
 extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *);
 extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8);
-extern void actcapi_dispatch(act2000_card *);
+extern void actcapi_dispatch(struct work_struct *);
 #ifdef DEBUG_MSG
 extern void actcapi_debug_msg(struct sk_buff *skb, int);
 #else
index d89dcde4eadefeb9ef25ab0bc92946dec34c7279..90593e2ef87209dbb36261e30947ea283a660d04 100644 (file)
@@ -192,8 +192,11 @@ act2000_set_msn(act2000_card *card, char *eazmsn)
 }
 
 static void
-act2000_transmit(struct act2000_card *card)
+act2000_transmit(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, snd_tq);
+
        switch (card->bus) {
                case ACT2000_BUS_ISA:
                        act2000_isa_send(card);
@@ -207,8 +210,11 @@ act2000_transmit(struct act2000_card *card)
 }
 
 static void
-act2000_receive(struct act2000_card *card)
+act2000_receive(struct work_struct *work)
 {
+       struct act2000_card *card =
+               container_of(work, struct act2000_card, poll_tq);
+
        switch (card->bus) {
                case ACT2000_BUS_ISA:
                        act2000_isa_receive(card);
@@ -227,7 +233,7 @@ act2000_poll(unsigned long data)
        act2000_card * card = (act2000_card *)data;
        unsigned long flags;
 
-       act2000_receive(card);
+       act2000_receive(&card->poll_tq);
        spin_lock_irqsave(&card->lock, flags);
        mod_timer(&card->ptimer, jiffies+3);
        spin_unlock_irqrestore(&card->lock, flags);
@@ -578,9 +584,9 @@ act2000_alloccard(int bus, int port, int irq, char *id)
        skb_queue_head_init(&card->sndq);
        skb_queue_head_init(&card->rcvq);
        skb_queue_head_init(&card->ackq);
-       INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card);
-       INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card);
-       INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card);
+       INIT_WORK(&card->snd_tq, act2000_transmit);
+       INIT_WORK(&card->rcv_tq, actcapi_dispatch);
+       INIT_WORK(&card->poll_tq, act2000_receive);
        init_timer(&card->ptimer);
        card->interface.owner = THIS_MODULE;
         card->interface.channels = ACT2000_BCH;
index 8c4fcb9027b345b4e1e20686fe1ee12d17e8dfda..783a25526315beacce63c85fa1536ddde2117c17 100644 (file)
@@ -208,9 +208,10 @@ static void notify_down(u32 contr)
        }
 }
 
-static void notify_handler(void *data)
+static void notify_handler(struct work_struct *work)
 {
-       struct capi_notifier *np = data;
+       struct capi_notifier *np =
+               container_of(work, struct capi_notifier, work);
 
        switch (np->cmd) {
        case KCI_CONTRUP:
@@ -235,7 +236,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
        if (!np)
                return -ENOMEM;
 
-       INIT_WORK(&np->work, notify_handler, np);
+       INIT_WORK(&np->work, notify_handler);
        np->cmd = cmd;
        np->controller = controller;
        np->applid = applid;
@@ -248,10 +249,11 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
        
 /* -------- Receiver ------------------------------------------ */
 
-static void recv_handler(void *_ap)
+static void recv_handler(struct work_struct *work)
 {
        struct sk_buff *skb;
-       struct capi20_appl *ap = (struct capi20_appl *) _ap;
+       struct capi20_appl *ap =
+               container_of(work, struct capi20_appl, recv_work);
 
        if ((!ap) || (ap->release_in_progress))
                return;
@@ -527,7 +529,7 @@ u16 capi20_register(struct capi20_appl *ap)
        ap->callback = NULL;
        init_MUTEX(&ap->recv_sem);
        skb_queue_head_init(&ap->recv_queue);
-       INIT_WORK(&ap->recv_work, recv_handler, (void *)ap);
+       INIT_WORK(&ap->recv_work, recv_handler);
        ap->release_in_progress = 0;
 
        write_unlock_irqrestore(&application_lock, flags);
index 0c937325a1b36ce937d8ca99ea20d2587da1dcbe..63b629b1cdb2be0c1e79ac5d7d56f100914a5c11 100644 (file)
@@ -572,7 +572,7 @@ static int atread_submit(struct cardstate *cs, int timeout)
                             ucs->rcvbuf, ucs->rcvbuf_size,
                             read_ctrl_callback, cs->inbuf);
 
-       if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
+       if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) {
                update_basstate(ucs, 0, BS_ATRDPEND);
                dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
                        get_usb_rcmsg(ret));
@@ -747,7 +747,7 @@ static void read_int_callback(struct urb *urb)
        check_pending(ucs);
 
 resubmit:
-       rc = usb_submit_urb(urb, SLAB_ATOMIC);
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
        if (unlikely(rc != 0 && rc != -ENODEV)) {
                dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
                        get_usb_rcmsg(rc));
@@ -807,7 +807,7 @@ static void read_iso_callback(struct urb *urb)
                        urb->number_of_packets = BAS_NUMFRAMES;
                        gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit",
                                __func__);
-                       rc = usb_submit_urb(urb, SLAB_ATOMIC);
+                       rc = usb_submit_urb(urb, GFP_ATOMIC);
                        if (unlikely(rc != 0 && rc != -ENODEV)) {
                                dev_err(bcs->cs->dev,
                                        "could not resubmit isochronous read "
@@ -900,7 +900,7 @@ static int starturbs(struct bc_state *bcs)
                }
 
                dump_urb(DEBUG_ISO, "Initial isoc read", urb);
-               if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0)
+               if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
                        goto error;
        }
 
@@ -935,7 +935,7 @@ static int starturbs(struct bc_state *bcs)
        /* submit two URBs, keep third one */
        for (k = 0; k < 2; ++k) {
                dump_urb(DEBUG_ISO, "Initial isoc write", urb);
-               rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
+               rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
                if (rc != 0)
                        goto error;
        }
@@ -1042,7 +1042,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
                return 0;       /* no data to send */
        urb->number_of_packets = nframe;
 
-       rc = usb_submit_urb(urb, SLAB_ATOMIC);
+       rc = usb_submit_urb(urb, GFP_ATOMIC);
        if (unlikely(rc)) {
                if (rc == -ENODEV)
                        /* device removed - give up silently */
@@ -1341,7 +1341,7 @@ static void read_iso_tasklet(unsigned long data)
                urb->dev = bcs->cs->hw.bas->udev;
                urb->transfer_flags = URB_ISO_ASAP;
                urb->number_of_packets = BAS_NUMFRAMES;
-               rc = usb_submit_urb(urb, SLAB_ATOMIC);
+               rc = usb_submit_urb(urb, GFP_ATOMIC);
                if (unlikely(rc != 0 && rc != -ENODEV)) {
                        dev_err(cs->dev,
                                "could not resubmit isochronous read URB: %s\n",
@@ -1458,7 +1458,7 @@ static void write_ctrl_callback(struct urb *urb)
                           ucs->retry_ctrl);
                /* urb->dev is clobbered by USB subsystem */
                urb->dev = ucs->udev;
-               rc = usb_submit_urb(urb, SLAB_ATOMIC);
+               rc = usb_submit_urb(urb, GFP_ATOMIC);
                if (unlikely(rc)) {
                        dev_err(&ucs->interface->dev,
                                "could not resubmit request 0x%02x: %s\n",
@@ -1517,7 +1517,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
                             (unsigned char*) &ucs->dr_ctrl, NULL, 0,
                             write_ctrl_callback, ucs);
        ucs->retry_ctrl = 0;
-       ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC);
+       ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
        if (unlikely(ret)) {
                dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s\n",
                        req, get_usb_rcmsg(ret));
@@ -1763,7 +1763,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
                             usb_sndctrlpipe(ucs->udev, 0),
                             (unsigned char*) &ucs->dr_cmd_out, buf, len,
                             write_command_callback, cs);
-       rc = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC);
+       rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
        if (unlikely(rc)) {
                update_basstate(ucs, 0, BS_ATWRPEND);
                dev_err(cs->dev, "could not submit HD_WRITE_ATMESSAGE: %s\n",
@@ -2218,21 +2218,21 @@ static int gigaset_probe(struct usb_interface *interface,
         * - three for the different uses of the default control pipe
         * - three for each isochronous pipe
         */
-       if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-           !(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) ||
-           !(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) ||
-           !(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL)))
+       if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+           !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) ||
+           !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) ||
+           !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
                goto allocerr;
 
        for (j = 0; j < 2; ++j) {
                ubc = cs->bcs[j].hw.bas;
                for (i = 0; i < BAS_OUTURBS; ++i)
                        if (!(ubc->isoouturbs[i].urb =
-                             usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+                             usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
                                goto allocerr;
                for (i = 0; i < BAS_INURBS; ++i)
                        if (!(ubc->isoinurbs[i] =
-                             usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL)))
+                             usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL)))
                                goto allocerr;
        }
 
@@ -2246,7 +2246,7 @@ static int gigaset_probe(struct usb_interface *interface,
                                        (endpoint->bEndpointAddress) & 0x0f),
                         ucs->int_in_buf, 3, read_int_callback, cs,
                         endpoint->bInterval);
-       if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) {
+       if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
                dev_err(cs->dev, "could not submit interrupt URB: %s\n",
                        get_usb_rcmsg(rc));
                goto error;
index 5ebf49ac9b2302b89abc26a2fe99c83cb523c398..04f2ad7ba8b04590ae927591096d95e0fc783a1b 100644 (file)
@@ -410,7 +410,7 @@ static void gigaset_read_int_callback(struct urb *urb)
 
        if (resubmit) {
                spin_lock_irqsave(&cs->lock, flags);
-               r = cs->connected ? usb_submit_urb(urb, SLAB_ATOMIC) : -ENODEV;
+               r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV;
                spin_unlock_irqrestore(&cs->lock, flags);
                if (r)
                        dev_err(cs->dev, "error %d when resubmitting urb.\n",
@@ -486,7 +486,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
                        atomic_set(&ucs->busy, 1);
 
                        spin_lock_irqsave(&cs->lock, flags);
-                       status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC) : -ENODEV;
+                       status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
                        spin_unlock_irqrestore(&cs->lock, flags);
 
                        if (status) {
@@ -664,7 +664,7 @@ static int write_modem(struct cardstate *cs)
                                                  ucs->bulk_out_endpointAddr & 0x0f),
                                  ucs->bulk_out_buffer, count,
                                  gigaset_write_bulk_callback, cs);
-               ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
+               ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
        } else {
                ret = -ENODEV;
        }
@@ -763,7 +763,7 @@ static int gigaset_probe(struct usb_interface *interface,
                goto error;
        }
 
-       ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
+       ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!ucs->bulk_out_urb) {
                dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n");
                retval = -ENOMEM;
@@ -774,7 +774,7 @@ static int gigaset_probe(struct usb_interface *interface,
 
        atomic_set(&ucs->busy, 0);
 
-       ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
+       ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!ucs->read_urb) {
                dev_err(cs->dev, "No free urbs available\n");
                retval = -ENOMEM;
@@ -797,7 +797,7 @@ static int gigaset_probe(struct usb_interface *interface,
                         gigaset_read_int_callback,
                         cs->inbuf + 0, endpoint->bInterval);
 
-       retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
+       retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
        if (retval) {
                dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval);
                goto error;
index 7bbfd85ab793721bbe9a2fec29ef546256c5c8af..fd5d7364a48725374cf599b4fa94aecfde53d1c6 100644 (file)
@@ -194,41 +194,11 @@ static int avmcs_config(struct pcmcia_device *link)
 
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
     do {
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       i = pcmcia_get_first_tuple(link, &tuple);
-       if (i != CS_SUCCESS) break;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = 64;
-       tuple.TupleOffset = 0;
-       i = pcmcia_get_tuple_data(link, &tuple);
-       if (i != CS_SUCCESS) break;
-       i = pcmcia_parse_tuple(link, &tuple, &parse);
-       if (i != CS_SUCCESS) break;
-       link->conf.ConfigBase = parse.config.base;
-    } while (0);
-    if (i != CS_SUCCESS) {
-       cs_error(link, ParseTuple, i);
-       return -ENODEV;
-    }
-
-    do {
-
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = 254;
-       tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_VERS_1;
-
        devname[0] = 0;
-       if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
-           strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 
-                       sizeof(devname));
-       }
+       if (link->prod_id[1])
+               strlcpy(devname, link->prod_id[1], sizeof(devname));
+
        /*
          * find IO port
          */
index bec59010bc66f3d65f4e433f8fc18eec860452f2..3b19caeba25880971a708213519ab734dc5ce343 100644 (file)
@@ -232,9 +232,10 @@ Amd7930_new_ph(struct IsdnCardState *cs)
 
 
 static void
-Amd7930_bh(struct IsdnCardState *cs)
+Amd7930_bh(struct work_struct *work)
 {
-
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
         struct PStack *stptr;
 
        if (!cs)
@@ -789,7 +790,7 @@ Amd7930_init(struct IsdnCardState *cs)
 void __devinit
 setup_Amd7930(struct IsdnCardState *cs)
 {
-        INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs);
+        INIT_WORK(&cs->tqueue, Amd7930_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
index ac28e3278ad91567ff710d572afce06b7ecfb31c..876fec6c6be82528fae97f9b95ca320fa6543318 100644 (file)
@@ -216,41 +216,11 @@ static int avma1cs_config(struct pcmcia_device *link)
 
     DEBUG(0, "avma1cs_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
     do {
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       i = pcmcia_get_first_tuple(link, &tuple);
-       if (i != CS_SUCCESS) break;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = 64;
-       tuple.TupleOffset = 0;
-       i = pcmcia_get_tuple_data(link, &tuple);
-       if (i != CS_SUCCESS) break;
-       i = pcmcia_parse_tuple(link, &tuple, &parse);
-       if (i != CS_SUCCESS) break;
-       link->conf.ConfigBase = parse.config.base;
-    } while (0);
-    if (i != CS_SUCCESS) {
-       cs_error(link, ParseTuple, i);
-       return -ENODEV;
-    }
-
-    do {
-
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = 254;
-       tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_VERS_1;
-
        devname[0] = 0;
-       if( !first_tuple(link, &tuple, &parse) && parse.version_1.ns > 1 ) {
-           strlcpy(devname,parse.version_1.str + parse.version_1.ofs[1], 
-                       sizeof(devname));
-       }
+       if (link->prod_id[1])
+               strlcpy(devname, link->prod_id[1], sizeof(devname));
+
        /*
          * find IO port
          */
index 785b08554fcaa3685a3115a965be95bccc1d72e1..cede72cdbb319ab2eb51f322a15a54e0a95a24b9 100644 (file)
@@ -1137,7 +1137,6 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
        cs->tx_skb = NULL;
        cs->tx_cnt = 0;
        cs->event = 0;
-       cs->tqueue.data = cs;
 
        skb_queue_head_init(&cs->rq);
        skb_queue_head_init(&cs->sq);
@@ -1554,7 +1553,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg);
 static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg);
 static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs);
 static void hisax_bc_close(struct BCState *bcs);
-static void hisax_bh(struct IsdnCardState *cs);
+static void hisax_bh(struct work_struct *work);
 static void EChannel_proc_rcv(struct hisax_d_if *d_if);
 
 int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
@@ -1586,7 +1585,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[],
        hisax_d_if->cs = cs;
        cs->hw.hisax_d_if = hisax_d_if;
        cs->cardmsg = hisax_cardmsg;
-       INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs);
+       INIT_WORK(&cs->tqueue, hisax_bh);
        cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1;
        for (i = 0; i < 2; i++) {
                cs->bcs[i].BC_SetStack = hisax_bc_setstack;
@@ -1618,8 +1617,10 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event)
        schedule_work(&cs->tqueue);
 }
 
-static void hisax_bh(struct IsdnCardState *cs)
+static void hisax_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *st;
        int pr;
 
index e18e75be8ed308f07e7789980203385a509954d4..4e180d210faa31ca0101c39e6304a1ef6ffc5874 100644 (file)
@@ -242,23 +242,6 @@ static int elsa_cs_config(struct pcmcia_device *link)
     DEBUG(0, "elsa_config(0x%p)\n", link);
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    tuple.TupleData = (cisdata_t *)buf;
-    tuple.TupleDataMax = 255;
-    tuple.TupleOffset = 0;
-    tuple.Attributes = 0;
-    i = first_tuple(link, &tuple, &parse);
-    if (i != CS_SUCCESS) {
-        last_fn = ParseTuple;
-       goto cs_failed;
-    }
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
index d852c9d998b2bd51ae022bf870b5062298b3615c..de9b1a4d6bac56bfb98d9892aee31bd24d11d8cd 100644 (file)
@@ -1083,8 +1083,9 @@ tx_b_frame(struct hfc4s8s_btype *bch)
 /* bottom half handler for interrupt */
 /*************************************/
 static void
-hfc4s8s_bh(hfc4s8s_hw * hw)
+hfc4s8s_bh(struct work_struct *work)
 {
+       hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue);
        u_char b;
        struct hfc4s8s_l1 *l1p;
        volatile u_char *fifo_stat;
@@ -1550,7 +1551,7 @@ setup_instance(hfc4s8s_hw * hw)
                goto out;
        }
 
-       INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw);
+       INIT_WORK(&hw->tqueue, hfc4s8s_bh);
 
        if (request_irq
            (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) {
index 6360e82147205b0aadd5120a2961bb2a8f58d57b..8d9864453a234ee060d01a3957c4420ec47b261d 100644 (file)
@@ -549,10 +549,11 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
 }
 
 static void
-hfcd_bh(struct IsdnCardState *cs)
+hfcd_bh(struct work_struct *work)
 {
-       if (!cs)
-               return;
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
+
        if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) {
                switch (cs->dc.hfcd.ph_state) {
                        case (0):
@@ -1072,5 +1073,5 @@ set_cs_func(struct IsdnCardState *cs)
        cs->dbusytimer.function = (void *) hfc_dbusy_timer;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
-       INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs);
+       INIT_WORK(&cs->tqueue, hfcd_bh);
 }
index 93f60b563515c6ea385e516a880fe7e977be25b7..5db0a85b827fb8009b5324b91ffeace21c035dfb 100644 (file)
@@ -1506,8 +1506,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
 /* handle L1 state changes */
 /***************************/
 static void
-hfcpci_bh(struct IsdnCardState *cs)
+hfcpci_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        u_long  flags;
 //      struct PStack *stptr;
 
@@ -1722,7 +1724,7 @@ setup_hfcpci(struct IsdnCard *card)
                Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
                /* At this point the needed PCI config is done */
                /* fifos are still not enabled */
-               INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs);
+               INIT_WORK(&cs->tqueue,  hfcpci_bh);
                cs->setstack_d = setstack_hfcpci;
                cs->BC_Send_Data = &hfcpci_send_data;
                cs->readisac = NULL;
index 954d1536db1ffa8d6ede0b7e1b4927f5722c0f6f..4fd09d21a27f587c52e9fd1485ab7132dff1151b 100644 (file)
@@ -1251,8 +1251,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs)
 /* handle L1 state changes */
 /***************************/
 static void
-hfcsx_bh(struct IsdnCardState *cs)
+hfcsx_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        u_long flags;
 
        if (!cs)
@@ -1499,7 +1501,7 @@ setup_hfcsx(struct IsdnCard *card)
        cs->dbusytimer.function = (void *) hfcsx_dbusy_timer;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
-       INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs);
+       INIT_WORK(&cs->tqueue, hfcsx_bh);
        cs->readisac = NULL;
        cs->writeisac = NULL;
        cs->readisacfifo = NULL;
index da706925d54d63a074532f6dfe15dd331b0e8d39..682cac32f259fc5c3be4d14f88f1c0462cea6f57 100644 (file)
@@ -77,8 +77,10 @@ icc_new_ph(struct IsdnCardState *cs)
 }
 
 static void
-icc_bh(struct IsdnCardState *cs)
+icc_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
        
        if (!cs)
@@ -674,7 +676,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
 void __devinit
 setup_icc(struct IsdnCardState *cs)
 {
-       INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs);
+       INIT_WORK(&cs->tqueue, icc_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
index 282f349408bc42c2e4e2ed38eac92317887eb466..4e9f23803daef8767388f16677c46182006543fe 100644 (file)
@@ -81,8 +81,10 @@ isac_new_ph(struct IsdnCardState *cs)
 }
 
 static void
-isac_bh(struct IsdnCardState *cs)
+isac_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
        
        if (!cs)
@@ -674,7 +676,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs)
 void __devinit
 setup_isac(struct IsdnCardState *cs)
 {
-       INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs);
+       INIT_WORK(&cs->tqueue, isac_bh);
        cs->dbusytimer.function = (void *) dbusy_timer_handler;
        cs->dbusytimer.data = (long) cs;
        init_timer(&cs->dbusytimer);
index 674af673ff965e9bb48b8e7c2ac9b4452e1607e5..6f1a6583b17d34c9aa2ec9f3e9764bcdf6fd192e 100644 (file)
@@ -437,8 +437,10 @@ extern void BChannel_bh(struct BCState *);
 #define B_LL_OK                10
 
 static void
-isar_bh(struct BCState *bcs)
+isar_bh(struct work_struct *work)
 {
+       struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
        BChannel_bh(bcs);
        if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event))
                ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR);
@@ -1580,7 +1582,7 @@ isar_setup(struct IsdnCardState *cs)
                cs->bcs[i].mode = 0;
                cs->bcs[i].hw.isar.dpath = i + 1;
                modeisar(&cs->bcs[i], 0, 0);
-               INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]);
+               INIT_WORK(&cs->bcs[i].tqueue, isar_bh);
        }
 }
 
index 269315988dc8eb3d177700b98f83dd4e9daeb44f..5655b5f9c48e2353f4c73cdd05e0c4e9e5a232be 100644 (file)
@@ -41,10 +41,10 @@ struct isdnhdlc_vars {
        unsigned char shift_reg;
        unsigned char ffvalue;
 
-       int data_received:1;    // set if transferring data
-       int dchannel:1;         // set if D channel (send idle instead of flags)
-       int do_adapt56:1;       // set if 56K adaptation
-        int do_closing:1;      // set if in closing phase (need to send CRC + flag
+       unsigned int data_received:1;   // set if transferring data
+       unsigned int dchannel:1;        // set if D channel (send idle instead of flags)
+       unsigned int do_adapt56:1;      // set if 56K adaptation
+       unsigned int do_closing:1;      // set if in closing phase (need to send CRC + flag
 };
 
 
index bab356886483e257b7c718e4bde5fe6c7e3ae9f7..a14204ec88eec7990aeadbe332b7406840a2bbd3 100644 (file)
@@ -315,8 +315,10 @@ BChannel_proc_ack(struct BCState *bcs)
 }
 
 void
-BChannel_bh(struct BCState *bcs)
+BChannel_bh(struct work_struct *work)
 {
+       struct BCState *bcs = container_of(work, struct BCState, tqueue);
+
        if (!bcs)
                return;
        if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event))
@@ -362,7 +364,7 @@ init_bcstate(struct IsdnCardState *cs, int bc)
 
        bcs->cs = cs;
        bcs->channel = bc;
-       INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs);
+       INIT_WORK(&bcs->tqueue, BChannel_bh);
        spin_lock_init(&bcs->aclock);
        bcs->BC_SetStack = NULL;
        bcs->BC_Close = NULL;
index f9c14a2970bc1a502692edaf19b17a88ffb320a3..46ed65334c51d8465bffdb2829b34024ed55e151 100644 (file)
@@ -233,20 +233,10 @@ static int sedlbauer_config(struct pcmcia_device *link)
 
     DEBUG(0, "sedlbauer_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.Attributes = 0;
     tuple.TupleData = buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
 
     CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
 
index afcc2aeadb3440b922e3f4eba3a62af2f200821c..6b754f183796318a6d7fb88bb18388f3169aff9b 100644 (file)
@@ -232,23 +232,6 @@ static int teles_cs_config(struct pcmcia_device *link)
     DEBUG(0, "teles_config(0x%p)\n", link);
     dev = link->priv;
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    tuple.TupleData = (cisdata_t *)buf;
-    tuple.TupleDataMax = 255;
-    tuple.TupleOffset = 0;
-    tuple.Attributes = 0;
-    i = first_tuple(link, &tuple, &parse);
-    if (i != CS_SUCCESS) {
-        last_fn = ParseTuple;
-       goto cs_failed;
-    }
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
     tuple.Attributes = 0;
index 1655341797a97816f510c9b90148a1862376ded4..3aeceaf9769e5dc30a7cbefd30b4995082f8c7f6 100644 (file)
@@ -101,8 +101,10 @@ W6692_new_ph(struct IsdnCardState *cs)
 }
 
 static void
-W6692_bh(struct IsdnCardState *cs)
+W6692_bh(struct work_struct *work)
 {
+       struct IsdnCardState *cs =
+               container_of(work, struct IsdnCardState, tqueue);
        struct PStack *stptr;
 
        if (!cs)
@@ -1070,7 +1072,7 @@ setup_w6692(struct IsdnCard *card)
               id_list[cs->subtyp].card_name, cs->irq,
               cs->hw.w6692.iobase);
 
-       INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs);
+       INIT_WORK(&cs->tqueue, W6692_bh);
        cs->readW6692 = &ReadW6692;
        cs->writeW6692 = &WriteW6692;
        cs->readisacfifo = &ReadISACfifo;
index 82e42a80dc4b01f24deafa97e4eaadfcd32de798..a1206498a1cf17bb1e39149103aeda8545b79a87 100644 (file)
@@ -71,8 +71,9 @@ ergo_interrupt(int intno, void *dev_id)
 /* may be queued from everywhere (interrupts included).                       */
 /******************************************************************************/
 static void
-ergo_irq_bh(hysdn_card * card)
+ergo_irq_bh(struct work_struct *ugli_api)
 {
+       hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue);
        tErgDpram *dpr;
        int again;
        unsigned long flags;
@@ -442,7 +443,7 @@ ergo_inithardware(hysdn_card * card)
        card->writebootseq = ergo_writebootseq;
        card->waitpofready = ergo_waitpofready;
        card->set_errlog_state = ergo_set_errlog_state;
-       INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card);
+       INIT_WORK(&card->irq_queue, ergo_irq_bh);
        card->hysdn_lock = SPIN_LOCK_UNLOCKED;
 
        return (0);
index 1f8d6ae66b41e47ac2be94ef6d44d25ffdc72b7b..2e4daebfb7e0ab0df1a944d50da6b4418f589dea 100644 (file)
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb)
 /*
  * called from tq_immediate
  */
-static void isdn_net_softint(void *private)
+static void isdn_net_softint(struct work_struct *work)
 {
-       isdn_net_local *lp = private;
+       isdn_net_local *lp = container_of(work, isdn_net_local, tqueue);
        struct sk_buff *skb;
 
        spin_lock_bh(&lp->xmit_lock);
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master)
        netdev->local->netdev = netdev;
        netdev->local->next = netdev->local;
 
-       INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local);
+       INIT_WORK(&netdev->local->tqueue, isdn_net_softint);
        spin_lock_init(&netdev->local->xmit_lock);
 
        netdev->local->isdn_device = -1;
index 6ead5e1508b705fd682c28c2a57c9a581ab5e611..1966f3410a13e600f91b6c776a50b5d1534b11e1 100644 (file)
@@ -68,8 +68,6 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list);
 static int pcbit_check_msn(struct pcbit_dev *dev, char *msn);
 
 
-extern void pcbit_deliver(void * data);
-
 int pcbit_init_dev(int board, int mem_base, int irq)
 {
        struct pcbit_dev *dev;
@@ -129,7 +127,7 @@ int pcbit_init_dev(int board, int mem_base, int irq)
        memset(dev->b2, 0, sizeof(struct pcbit_chan));
        dev->b2->id = 1;
 
-       INIT_WORK(&dev->qdelivery, pcbit_deliver, dev);
+       INIT_WORK(&dev->qdelivery, pcbit_deliver);
 
        /*
         *  interrupts
index 937fd21203816b7521aa92b660672b424e8f6dc3..0c9f6df873fc55b63732922b40c3f7567faf8dc5 100644 (file)
@@ -67,7 +67,6 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
  *  Prototypes
  */
 
-void pcbit_deliver(void *data);
 static void pcbit_transmit(struct pcbit_dev *dev);
 
 static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack);
@@ -299,11 +298,12 @@ pcbit_transmit(struct pcbit_dev *dev)
  */
 
 void
-pcbit_deliver(void *data)
+pcbit_deliver(struct work_struct *work)
 {
        struct frame_buf *frame;
        unsigned long flags, msg;
-       struct pcbit_dev *dev = (struct pcbit_dev *) data;
+       struct pcbit_dev *dev =
+               container_of(work, struct pcbit_dev, qdelivery);
 
        spin_lock_irqsave(&dev->lock, flags);
 
index 388bacefd23ac9ad9349f8b53a64f78c39bae8fb..19c18e88ff160be7c315c6c43f32dafbd62cdb79 100644 (file)
@@ -166,4 +166,6 @@ struct pcbit_ioctl {
 #define L2_RUNNING  5
 #define L2_ERROR    6
 
+extern void pcbit_deliver(struct work_struct *work);
+
 #endif
index 9c39b98d5a5b2a6f2689e1fbb0a04c8395e874db..176142c61492c99c1f31a16c7f3572a381e1e006 100644 (file)
@@ -76,6 +76,12 @@ config LEDS_NET48XX
          This option enables support for the Soekris net4801 and net4826 error
          LED.
 
+config LEDS_WRAP
+       tristate "LED Support for the WRAP series LEDs"
+       depends on LEDS_CLASS && SCx200_GPIO
+       help
+         This option enables support for the PCEngines WRAP programmable LEDs.
+
 comment "LED Triggers"
 
 config LEDS_TRIGGERS
index 6aa2aed7539de65135000203e46dab3c0e28a454..500de3dc962adedcb2576e543754f6d54d439988 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_TOSA)                       += leds-tosa.o
 obj-$(CONFIG_LEDS_S3C24XX)             += leds-s3c24xx.o
 obj-$(CONFIG_LEDS_AMS_DELTA)           += leds-ams-delta.o
 obj-$(CONFIG_LEDS_NET48XX)             += leds-net48xx.o
+obj-$(CONFIG_LEDS_WRAP)                        += leds-wrap.o
 
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)       += ledtrig-timer.o
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
new file mode 100644 (file)
index 0000000..27fb2d8
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * LEDs driver for PCEngines WRAP
+ *
+ * Copyright (C) 2006 Kristian Kielhofner <kris@krisk.org>
+ *
+ * Based on leds-net48xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#include <linux/scx200_gpio.h>
+
+#define DRVNAME "wrap-led"
+#define WRAP_ERROR_LED_GPIO    3
+#define        WRAP_EXTRA_LED_GPIO     18
+
+static struct platform_device *pdev;
+
+static void wrap_error_led_set(struct led_classdev *led_cdev,
+               enum led_brightness value)
+{
+       if (value)
+               scx200_gpio_set_low(WRAP_ERROR_LED_GPIO);
+       else
+               scx200_gpio_set_high(WRAP_ERROR_LED_GPIO);
+}
+
+static void wrap_extra_led_set(struct led_classdev *led_cdev,
+               enum led_brightness value)
+{
+       if (value)
+               scx200_gpio_set_low(WRAP_EXTRA_LED_GPIO);
+       else
+               scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
+}
+
+static struct led_classdev wrap_error_led = {
+       .name           = "wrap:error",
+       .brightness_set = wrap_error_led_set,
+};
+
+static struct led_classdev wrap_extra_led = {
+       .name           = "wrap:extra",
+       .brightness_set = wrap_extra_led_set,
+};
+
+#ifdef CONFIG_PM
+static int wrap_led_suspend(struct platform_device *dev,
+               pm_message_t state)
+{
+       led_classdev_suspend(&wrap_error_led);
+       led_classdev_suspend(&wrap_extra_led);
+       return 0;
+}
+
+static int wrap_led_resume(struct platform_device *dev)
+{
+       led_classdev_resume(&wrap_error_led);
+       led_classdev_resume(&wrap_extra_led);
+       return 0;
+}
+#else
+#define wrap_led_suspend NULL
+#define wrap_led_resume NULL
+#endif
+
+static int wrap_led_probe(struct platform_device *pdev)
+{
+       int ret;
+
+       ret = led_classdev_register(&pdev->dev, &wrap_error_led);
+       if (ret == 0) {
+               ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
+               if (ret < 0)
+                       led_classdev_unregister(&wrap_error_led);
+       }
+       return ret;
+}
+
+static int wrap_led_remove(struct platform_device *pdev)
+{
+       led_classdev_unregister(&wrap_error_led);
+       led_classdev_unregister(&wrap_extra_led);
+       return 0;
+}
+
+static struct platform_driver wrap_led_driver = {
+       .probe          = wrap_led_probe,
+       .remove         = wrap_led_remove,
+       .suspend        = wrap_led_suspend,
+       .resume         = wrap_led_resume,
+       .driver         = {
+               .name           = DRVNAME,
+               .owner          = THIS_MODULE,
+       },
+};
+
+static int __init wrap_led_init(void)
+{
+       int ret;
+
+       if (!scx200_gpio_present()) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = platform_driver_register(&wrap_led_driver);
+       if (ret < 0)
+               goto out;
+
+       pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               ret = PTR_ERR(pdev);
+               platform_driver_unregister(&wrap_led_driver);
+               goto out;
+       }
+
+out:
+       return ret;
+}
+
+static void __exit wrap_led_exit(void)
+{
+       platform_device_unregister(pdev);
+       platform_driver_unregister(&wrap_led_driver);
+}
+
+module_init(wrap_led_init);
+module_exit(wrap_led_exit);
+
+MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
+MODULE_DESCRIPTION("PCEngines WRAP LED driver");
+MODULE_LICENSE("GPL");
+
index 7f8477d3a66128add17a44a5455dea6787e7248b..92ccee85e2a20fe30ab9fbe20ed64c8217ddccf9 100644 (file)
@@ -228,4 +228,11 @@ config ANSLCD
        tristate "Support for ANS LCD display"
        depends on ADB_CUDA && PPC_PMAC
 
+config PMAC_RACKMETER
+       tristate "Support for Apple XServe front panel LEDs"
+       depends on PPC_PMAC
+       help
+         This driver procides some support to control the front panel
+          blue LEDs "vu-meter" of the XServer macs.
+
 endmenu
index b53d45f87b0bee7a74834cab9b2897f7d8eaa0f9..2dfc3f4eaf42b2055e8b4cddaf6ad6cff989180e 100644 (file)
@@ -42,3 +42,4 @@ obj-$(CONFIG_WINDFARM_PM112)  += windfarm_pm112.o windfarm_smu_sat.o \
                                   windfarm_smu_sensors.o \
                                   windfarm_max6690_sensor.o \
                                   windfarm_lm75_sensor.o windfarm_pid.o
+obj-$(CONFIG_PMAC_RACKMETER)   += rack-meter.o
index be0bd34ff6f90f7c311557f086b309da61b86fa1..d43ea81d6df9bcae26441a61edba92349d5308a1 100644 (file)
@@ -267,12 +267,12 @@ adb_probe_task(void *x)
 }
 
 static void
-__adb_probe_task(void *data)
+__adb_probe_task(struct work_struct *bullshit)
 {
        adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL);
 }
 
-static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL);
+static DECLARE_WORK(adb_reset_work, __adb_probe_task);
 
 int
 adb_reset_bus(void)
index 1293876a2ebd3c60244ae12c7ceb05bf630b315f..8862a83b8d8480451453fc7e6625350ce7f46cb2 100644 (file)
@@ -529,7 +529,8 @@ static int __init apm_emu_init(void)
        if (apm_proc)
                apm_proc->owner = THIS_MODULE;
 
-       misc_register(&apm_device);
+       if (misc_register(&apm_device) != 0)
+               printk(KERN_INFO "Could not create misc. device for apm\n");
 
        pmu_register_sleep_notifier(&apm_sleep_notifier);
 
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
new file mode 100644 (file)
index 0000000..5ed41fe
--- /dev/null
@@ -0,0 +1,616 @@
+/*
+ * RackMac vu-meter driver
+ *
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ * Released under the term of the GNU GPL v2.
+ *
+ * Support the CPU-meter LEDs of the Xserve G5
+ *
+ * TODO: Implement PWM to do variable intensity and provide userland
+ * interface for fun. Also, the CPU-meter could be made nicer by being
+ * a bit less "immediate" but giving instead a more average load over
+ * time. Patches welcome :-)
+ *
+ */
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#include <asm/dbdma.h>
+#include <asm/dbdma.h>
+#include <asm/macio.h>
+#include <asm/keylargo.h>
+
+/* Number of samples in a sample buffer */
+#define SAMPLE_COUNT           256
+
+/* CPU meter sampling rate in ms */
+#define CPU_SAMPLING_RATE      250
+
+struct rackmeter_dma {
+       struct dbdma_cmd        cmd[4]                  ____cacheline_aligned;
+       u32                     mark                    ____cacheline_aligned;
+       u32                     buf1[SAMPLE_COUNT]      ____cacheline_aligned;
+       u32                     buf2[SAMPLE_COUNT]      ____cacheline_aligned;
+} ____cacheline_aligned;
+
+struct rackmeter_cpu {
+       struct delayed_work     sniffer;
+       struct rackmeter        *rm;
+       cputime64_t             prev_wall;
+       cputime64_t             prev_idle;
+       int                     zero;
+} ____cacheline_aligned;
+
+struct rackmeter {
+       struct macio_dev                *mdev;
+       unsigned int                    irq;
+       struct device_node              *i2s;
+       u8                              *ubuf;
+       struct dbdma_regs __iomem       *dma_regs;
+       void __iomem                    *i2s_regs;
+       dma_addr_t                      dma_buf_p;
+       struct rackmeter_dma            *dma_buf_v;
+       int                             stale_irq;
+       struct rackmeter_cpu            cpu[2];
+       int                             paused;
+       struct mutex                    sem;
+};
+
+/* To be set as a tunable */
+static int rackmeter_ignore_nice;
+
+/* This GPIO is whacked by the OS X driver when initializing */
+#define RACKMETER_MAGIC_GPIO   0x78
+
+/* This is copied from cpufreq_ondemand, maybe we should put it in
+ * a common header somewhere
+ */
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+{
+       cputime64_t retval;
+
+       retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
+                       kstat_cpu(cpu).cpustat.iowait);
+
+       if (rackmeter_ignore_nice)
+               retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+
+       return retval;
+}
+
+static void rackmeter_setup_i2s(struct rackmeter *rm)
+{
+       struct macio_chip *macio = rm->mdev->bus->chip;
+
+       /* First whack magic GPIO */
+       pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
+
+
+       /* Call feature code to enable the sound channel and the proper
+        * clock sources
+        */
+       pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
+
+       /* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
+        * This is a bit racy, thus we should add new platform functions to
+        * handle that. snd-aoa needs that too
+        */
+       MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
+       MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
+       (void)MACIO_IN32(KEYLARGO_FCR1);
+       udelay(10);
+
+       /* Then setup i2s. For now, we use the same magic value that
+        * the OS X driver seems to use. We might want to play around
+        * with the clock divisors later
+        */
+       out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
+       (void)in_le32(rm->i2s_regs + 0x10);
+       udelay(10);
+
+       /* Fully restart i2s*/
+       MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
+                 KL1_I2S0_CLK_ENABLE_BIT);
+       (void)MACIO_IN32(KEYLARGO_FCR1);
+       udelay(10);
+}
+
+static void rackmeter_set_default_pattern(struct rackmeter *rm)
+{
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               if (i < 8)
+                       rm->ubuf[i] = (i & 1) * 255;
+               else
+                       rm->ubuf[i] = ((~i) & 1) * 255;
+       }
+}
+
+static void rackmeter_do_pause(struct rackmeter *rm, int pause)
+{
+       struct rackmeter_dma *rdma = rm->dma_buf_v;
+
+       pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
+
+       rm->paused = pause;
+       if (pause) {
+               DBDMA_DO_STOP(rm->dma_regs);
+               return;
+       }
+       memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32));
+       memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32));
+
+       rm->dma_buf_v->mark = 0;
+
+       mb();
+       out_le32(&rm->dma_regs->cmdptr_hi, 0);
+       out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
+       out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
+}
+
+static void rackmeter_setup_dbdma(struct rackmeter *rm)
+{
+       struct rackmeter_dma *db = rm->dma_buf_v;
+       struct dbdma_cmd *cmd = db->cmd;
+
+       /* Make sure dbdma is reset */
+       DBDMA_DO_RESET(rm->dma_regs);
+
+       pr_debug("rackmeter: mark offset=0x%lx\n",
+                offsetof(struct rackmeter_dma, mark));
+       pr_debug("rackmeter: buf1 offset=0x%lx\n",
+                offsetof(struct rackmeter_dma, buf1));
+       pr_debug("rackmeter: buf2 offset=0x%lx\n",
+                offsetof(struct rackmeter_dma, buf2));
+
+       /* Prepare 4 dbdma commands for the 2 buffers */
+       memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
+       st_le16(&cmd->req_count, 4);
+       st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+       st_le32(&cmd->phy_addr, rm->dma_buf_p +
+               offsetof(struct rackmeter_dma, mark));
+       st_le32(&cmd->cmd_dep, 0x02000000);
+       cmd++;
+
+       st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
+       st_le16(&cmd->command, OUTPUT_MORE);
+       st_le32(&cmd->phy_addr, rm->dma_buf_p +
+               offsetof(struct rackmeter_dma, buf1));
+       cmd++;
+
+       st_le16(&cmd->req_count, 4);
+       st_le16(&cmd->command, STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
+       st_le32(&cmd->phy_addr, rm->dma_buf_p +
+               offsetof(struct rackmeter_dma, mark));
+       st_le32(&cmd->cmd_dep, 0x01000000);
+       cmd++;
+
+       st_le16(&cmd->req_count, SAMPLE_COUNT * 4);
+       st_le16(&cmd->command, OUTPUT_MORE | BR_ALWAYS);
+       st_le32(&cmd->phy_addr, rm->dma_buf_p +
+               offsetof(struct rackmeter_dma, buf2));
+       st_le32(&cmd->cmd_dep, rm->dma_buf_p);
+
+       rackmeter_do_pause(rm, 0);
+}
+
+static void rackmeter_do_timer(struct work_struct *work)
+{
+       struct rackmeter_cpu *rcpu =
+               container_of(work, struct rackmeter_cpu, sniffer.work);
+       struct rackmeter *rm = rcpu->rm;
+       unsigned int cpu = smp_processor_id();
+       cputime64_t cur_jiffies, total_idle_ticks;
+       unsigned int total_ticks, idle_ticks;
+       int i, offset, load, cumm, pause;
+
+       cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
+       total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
+                                                 rcpu->prev_wall);
+       rcpu->prev_wall = cur_jiffies;
+
+       total_idle_ticks = get_cpu_idle_time(cpu);
+       idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
+                               rcpu->prev_idle);
+       rcpu->prev_idle = total_idle_ticks;
+
+       /* We do a very dumb calculation to update the LEDs for now,
+        * we'll do better once we have actual PWM implemented
+        */
+       load = (9 * (total_ticks - idle_ticks)) / total_ticks;
+
+       offset = cpu << 3;
+       cumm = 0;
+       for (i = 0; i < 8; i++) {
+               u8 ub = (load > i) ? 0xff : 0;
+               rm->ubuf[i + offset] = ub;
+               cumm |= ub;
+       }
+       rcpu->zero = (cumm == 0);
+
+       /* Now check if LEDs are all 0, we can stop DMA */
+       pause = (rm->cpu[0].zero && rm->cpu[1].zero);
+       if (pause != rm->paused) {
+               mutex_lock(&rm->sem);
+               pause = (rm->cpu[0].zero && rm->cpu[1].zero);
+               rackmeter_do_pause(rm, pause);
+               mutex_unlock(&rm->sem);
+       }
+       schedule_delayed_work_on(cpu, &rcpu->sniffer,
+                                msecs_to_jiffies(CPU_SAMPLING_RATE));
+}
+
+static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm)
+{
+       unsigned int cpu;
+
+       /* This driver works only with 1 or 2 CPUs numbered 0 and 1,
+        * but that's really all we have on Apple Xserve. It doesn't
+        * play very nice with CPU hotplug neither but we don't do that
+        * on those machines yet
+        */
+
+       rm->cpu[0].rm = rm;
+       INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
+       rm->cpu[1].rm = rm;
+       INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
+
+       for_each_online_cpu(cpu) {
+               struct rackmeter_cpu *rcpu;
+
+               if (cpu > 1)
+                       continue;
+               rcpu = &rm->cpu[cpu];;
+               rcpu->prev_idle = get_cpu_idle_time(cpu);
+               rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
+               schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
+                                        msecs_to_jiffies(CPU_SAMPLING_RATE));
+       }
+}
+
+static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
+{
+       cancel_rearming_delayed_work(&rm->cpu[0].sniffer);
+       cancel_rearming_delayed_work(&rm->cpu[1].sniffer);
+}
+
+static int rackmeter_setup(struct rackmeter *rm)
+{
+       pr_debug("rackmeter: setting up i2s..\n");
+       rackmeter_setup_i2s(rm);
+
+       pr_debug("rackmeter: setting up default pattern..\n");
+       rackmeter_set_default_pattern(rm);
+
+       pr_debug("rackmeter: setting up dbdma..\n");
+       rackmeter_setup_dbdma(rm);
+
+       pr_debug("rackmeter: start CPU measurements..\n");
+       rackmeter_init_cpu_sniffer(rm);
+
+       printk(KERN_INFO "RackMeter initialized\n");
+
+       return 0;
+}
+
+/*  XXX FIXME: No PWM yet, this is 0/1 */
+static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
+{
+       int led;
+       u32 sample = 0;
+
+       for (led = 0; led < 16; led++) {
+               sample >>= 1;
+               sample |= ((rm->ubuf[led] >= 0x80) << 15);
+       }
+       return (sample << 17) | (sample >> 15);
+}
+
+static irqreturn_t rackmeter_irq(int irq, void *arg)
+{
+       struct rackmeter *rm = arg;
+       struct rackmeter_dma *db = rm->dma_buf_v;
+       unsigned int mark, i;
+       u32 *buf;
+
+       /* Flush PCI buffers with an MMIO read. Maybe we could actually
+        * check the status one day ... in case things go wrong, though
+        * this never happened to me
+        */
+       (void)in_le32(&rm->dma_regs->status);
+
+       /* Make sure the CPU gets us in order */
+       rmb();
+
+       /* Read mark */
+       mark = db->mark;
+       if (mark != 1 && mark != 2) {
+               printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
+                      mark);
+               /* We allow for 3 errors like that (stale DBDMA irqs) */
+               if (++rm->stale_irq > 3) {
+                       printk(KERN_ERR "rackmeter: Too many errors,"
+                              " stopping DMA\n");
+                       DBDMA_DO_RESET(rm->dma_regs);
+               }
+               return IRQ_HANDLED;
+       }
+
+       /* Next buffer we need to fill is mark value */
+       buf = mark == 1 ? db->buf1 : db->buf2;
+
+       /* Fill it now. This routine converts the 8 bits depth sample array
+        * into the PWM bitmap for each LED.
+        */
+       for (i = 0; i < SAMPLE_COUNT; i++)
+               buf[i] = rackmeter_calc_sample(rm, i);
+
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit rackmeter_probe(struct macio_dev* mdev,
+                                    const struct of_device_id *match)
+{
+       struct device_node *i2s = NULL, *np = NULL;
+       struct rackmeter *rm = NULL;
+       struct resource ri2s, rdma;
+       int rc = -ENODEV;
+
+       pr_debug("rackmeter_probe()\n");
+
+       /* Get i2s-a node */
+       while ((i2s = of_get_next_child(mdev->ofdev.node, i2s)) != NULL)
+              if (strcmp(i2s->name, "i2s-a") == 0)
+                      break;
+       if (i2s == NULL) {
+               pr_debug("  i2s-a child not found\n");
+               goto bail;
+       }
+       /* Get lightshow or virtual sound */
+       while ((np = of_get_next_child(i2s, np)) != NULL) {
+              if (strcmp(np->name, "lightshow") == 0)
+                      break;
+              if ((strcmp(np->name, "sound") == 0) &&
+                  get_property(np, "virtual", NULL) != NULL)
+                      break;
+       }
+       if (np == NULL) {
+               pr_debug("  lightshow or sound+virtual child not found\n");
+               goto bail;
+       }
+
+       /* Create and initialize our instance data */
+       rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL);
+       if (rm == NULL) {
+               printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
+               rc = -ENOMEM;
+               goto bail_release;
+       }
+       rm->mdev = mdev;
+       rm->i2s = i2s;
+       mutex_init(&rm->sem);
+       dev_set_drvdata(&mdev->ofdev.dev, rm);
+       /* Check resources availability. We need at least resource 0 and 1 */
+#if 0 /* Use that when i2s-a is finally an mdev per-se */
+       if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
+               printk(KERN_ERR
+                      "rackmeter: found match but lacks resources: %s"
+                      " (%d resources, %d interrupts)\n",
+                      mdev->ofdev.node->full_name);
+               rc = -ENXIO;
+               goto bail_free;
+       }
+       if (macio_request_resources(mdev, "rackmeter")) {
+               printk(KERN_ERR
+                      "rackmeter: failed to request resources: %s\n",
+                      mdev->ofdev.node->full_name);
+               rc = -EBUSY;
+               goto bail_free;
+       }
+       rm->irq = macio_irq(mdev, 1);
+#else
+       rm->irq = irq_of_parse_and_map(i2s, 1);
+       if (rm->irq == NO_IRQ ||
+           of_address_to_resource(i2s, 0, &ri2s) ||
+           of_address_to_resource(i2s, 1, &rdma)) {
+               printk(KERN_ERR
+                      "rackmeter: found match but lacks resources: %s",
+                      mdev->ofdev.node->full_name);
+               rc = -ENXIO;
+               goto bail_free;
+       }
+#endif
+
+       pr_debug("  i2s @0x%08x\n", (unsigned int)ri2s.start);
+       pr_debug("  dma @0x%08x\n", (unsigned int)rdma.start);
+       pr_debug("  irq %d\n", rm->irq);
+
+       rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
+       if (rm->ubuf == NULL) {
+               printk(KERN_ERR
+                      "rackmeter: failed to allocate samples page !\n");
+               rc = -ENOMEM;
+               goto bail_release;
+       }
+
+       rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
+                                          sizeof(struct rackmeter_dma),
+                                          &rm->dma_buf_p, GFP_KERNEL);
+       if (rm->dma_buf_v == NULL) {
+               printk(KERN_ERR
+                      "rackmeter: failed to allocate dma buffer !\n");
+               rc = -ENOMEM;
+               goto bail_free_samples;
+       }
+#if 0
+       rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
+#else
+       rm->i2s_regs = ioremap(ri2s.start, 0x1000);
+#endif
+       if (rm->i2s_regs == NULL) {
+               printk(KERN_ERR
+                      "rackmeter: failed to map i2s registers !\n");
+               rc = -ENXIO;
+               goto bail_free_dma;
+       }
+#if 0
+       rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
+#else
+       rm->dma_regs = ioremap(rdma.start, 0x100);
+#endif
+       if (rm->dma_regs == NULL) {
+               printk(KERN_ERR
+                      "rackmeter: failed to map dma registers !\n");
+               rc = -ENXIO;
+               goto bail_unmap_i2s;
+       }
+
+       rc = rackmeter_setup(rm);
+       if (rc) {
+               printk(KERN_ERR
+                      "rackmeter: failed to initialize !\n");
+               rc = -ENXIO;
+               goto bail_unmap_dma;
+       }
+
+       rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
+       if (rc != 0) {
+               printk(KERN_ERR
+                      "rackmeter: failed to request interrupt !\n");
+               goto bail_stop_dma;
+       }
+       of_node_put(np);
+       return 0;
+
+ bail_stop_dma:
+       DBDMA_DO_RESET(rm->dma_regs);
+ bail_unmap_dma:
+       iounmap(rm->dma_regs);
+ bail_unmap_i2s:
+       iounmap(rm->i2s_regs);
+ bail_free_dma:
+       dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
+                         sizeof(struct rackmeter_dma),
+                         rm->dma_buf_v, rm->dma_buf_p);
+ bail_free_samples:
+       free_page((unsigned long)rm->ubuf);
+ bail_release:
+#if 0
+       macio_release_resources(mdev);
+#endif
+ bail_free:
+       kfree(rm);
+ bail:
+       of_node_put(i2s);
+       of_node_put(np);
+       dev_set_drvdata(&mdev->ofdev.dev, NULL);
+       return rc;
+}
+
+static int __devexit rackmeter_remove(struct macio_dev* mdev)
+{
+       struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
+
+       /* Stop CPU sniffer timer & work queues */
+       rackmeter_stop_cpu_sniffer(rm);
+
+       /* Clear reference to private data */
+       dev_set_drvdata(&mdev->ofdev.dev, NULL);
+
+       /* Stop/reset dbdma */
+       DBDMA_DO_RESET(rm->dma_regs);
+
+       /* Release the IRQ */
+       free_irq(rm->irq, rm);
+
+       /* Unmap registers */
+       iounmap(rm->dma_regs);
+       iounmap(rm->i2s_regs);
+
+       /* Free DMA */
+       dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
+                         sizeof(struct rackmeter_dma),
+                         rm->dma_buf_v, rm->dma_buf_p);
+
+       /* Free samples */
+       free_page((unsigned long)rm->ubuf);
+
+#if 0
+       /* Release resources */
+       macio_release_resources(mdev);
+#endif
+
+       /* Get rid of me */
+       kfree(rm);
+
+       return 0;
+}
+
+static int rackmeter_shutdown(struct macio_dev* mdev)
+{
+       struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
+
+       if (rm == NULL)
+               return -ENODEV;
+
+       /* Stop CPU sniffer timer & work queues */
+       rackmeter_stop_cpu_sniffer(rm);
+
+       /* Stop/reset dbdma */
+       DBDMA_DO_RESET(rm->dma_regs);
+
+       return 0;
+}
+
+static struct of_device_id rackmeter_match[] = {
+       { .name = "i2s" },
+       { }
+};
+
+static struct macio_driver rackmeter_drv = {
+       .name = "rackmeter",
+       .owner = THIS_MODULE,
+       .match_table = rackmeter_match,
+       .probe = rackmeter_probe,
+       .remove = rackmeter_remove,
+       .shutdown = rackmeter_shutdown,
+};
+
+
+static int __init rackmeter_init(void)
+{
+       pr_debug("rackmeter_init()\n");
+
+       return macio_register_driver(&rackmeter_drv);
+}
+
+static void __exit rackmeter_exit(void)
+{
+       pr_debug("rackmeter_exit()\n");
+
+       macio_unregister_driver(&rackmeter_drv);
+}
+
+module_init(rackmeter_init);
+module_exit(rackmeter_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
index ade25b3fbb35bd5057b17a623c801ef76a762a5c..6dde27ab79a817a1c9899eac5a319754e8d8f534 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/abs_addr.h>
 #include <asm/uaccess.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #define VERSION "0.7"
 #define AUTHOR  "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
@@ -600,7 +601,7 @@ core_initcall(smu_late_init);
  * sysfs visibility
  */
 
-static void smu_expose_childs(void *unused)
+static void smu_expose_childs(struct work_struct *unused)
 {
        struct device_node *np;
 
@@ -610,7 +611,7 @@ static void smu_expose_childs(void *unused)
                                                  &smu->of_dev->dev);
 }
 
-static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
+static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs);
 
 static int smu_platform_probe(struct of_device* dev,
                              const struct of_device_id *match)
@@ -653,7 +654,7 @@ static int __init smu_init_sysfs(void)
         * I'm a bit too far from figuring out how that works with those
         * new chipsets, but that will come back and bite us
         */
-       of_register_driver(&smu_of_platform_driver);
+       of_register_platform_driver(&smu_of_platform_driver);
        return 0;
 }
 
index a0f30d0853ea248df6bba6d4a2dd25e96159346a..3d3bf1643e73ff98fc863cdd5d2ec6b8b5965291 100644 (file)
 #include <linux/suspend.h>
 #include <linux/kthread.h>
 #include <linux/moduleparam.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/system.h>
 #include <asm/sections.h>
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #undef DEBUG
 
index d00c0c37a12e2c2c443148a2361b27e0e29ff98e..2e4ad44a863640424745e711f5bd73b0a5703af6 100644 (file)
 #include <asm/sections.h>
 #include <asm/of_device.h>
 #include <asm/macio.h>
+#include <asm/of_platform.h>
 
 #include "therm_pm72.h"
 
@@ -2236,14 +2237,14 @@ static int __init therm_pm72_init(void)
                return -ENODEV;
        }
 
-       of_register_driver(&fcu_of_platform_driver);
+       of_register_platform_driver(&fcu_of_platform_driver);
        
        return 0;
 }
 
 static void __exit therm_pm72_exit(void)
 {
-       of_unregister_driver(&fcu_of_platform_driver);
+       of_unregister_platform_driver(&fcu_of_platform_driver);
 
        if (of_dev)
                of_device_unregister(of_dev);
index 738faab1b22c2fac50b7d8f6a29902c36a997c9d..a1d3a987cb3acad619ed55adcc3b2d1752b9091b 100644 (file)
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/system.h>
 #include <asm/sections.h>
-#include <asm/of_device.h>
+#include <asm/of_platform.h>
 #include <asm/macio.h>
 
 #define LOG_TEMP               0                       /* continously log temperature */
@@ -511,14 +512,14 @@ g4fan_init( void )
                return -ENODEV;
        }
 
-       of_register_driver( &therm_of_driver );
+       of_register_platform_driver( &therm_of_driver );
        return 0;
 }
 
 static void __exit
 g4fan_exit( void )
 {
-       of_unregister_driver( &therm_of_driver );
+       of_unregister_platform_driver( &therm_of_driver );
 
        if( x.of_dev )
                of_device_unregister( x.of_dev );
index e63ea1c1f3c1de9833ac155d8e3e7587d5ff8693..c8558d4ed5064416e88b69d7a8d0a8e655939ee4 100644 (file)
@@ -42,7 +42,7 @@
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/sysdev.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/syscalls.h>
 #include <linux/cpu.h>
 #include <asm/prom.h>
index ab3faa702d58b0d70da4a6d9053dc37f9ff44acf..e947af982f93503b0e20bd321247e4c6770b3abe 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/prom.h>
 
index 08a40f4e4f60332f370f180a89124fe26ee3e85b..a1086ee8cccde1828860a24ee8667100e88dd701 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
+#include <asm/unaligned.h>
 
 #include "dm.h"
 
@@ -85,7 +86,10 @@ struct crypt_config {
         */
        struct crypt_iv_operations *iv_gen_ops;
        char *iv_mode;
-       struct crypto_cipher *iv_gen_private;
+       union {
+               struct crypto_cipher *essiv_tfm;
+               int benbi_shift;
+       } iv_gen_private;
        sector_t iv_offset;
        unsigned int iv_size;
 
@@ -101,7 +105,7 @@ struct crypt_config {
 #define MIN_POOL_PAGES 32
 #define MIN_BIO_PAGES  8
 
-static kmem_cache_t *_crypt_io_pool;
+static struct kmem_cache *_crypt_io_pool;
 
 /*
  * Different IV generation algorithms:
@@ -113,6 +117,9 @@ static kmem_cache_t *_crypt_io_pool;
  *        encrypted with the bulk cipher using a salt as key. The salt
  *        should be derived from the bulk cipher's key via hashing.
  *
+ * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
+ *        (needed for LRW-32-AES and possible other narrow block modes)
+ *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
@@ -191,21 +198,61 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
        }
        kfree(salt);
 
-       cc->iv_gen_private = essiv_tfm;
+       cc->iv_gen_private.essiv_tfm = essiv_tfm;
        return 0;
 }
 
 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 {
-       crypto_free_cipher(cc->iv_gen_private);
-       cc->iv_gen_private = NULL;
+       crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+       cc->iv_gen_private.essiv_tfm = NULL;
 }
 
 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
        memset(iv, 0, cc->iv_size);
        *(u64 *)iv = cpu_to_le64(sector);
-       crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
+       crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+       return 0;
+}
+
+static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+                             const char *opts)
+{
+       unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
+       int log = long_log2(bs);
+
+       /* we need to calculate how far we must shift the sector count
+        * to get the cipher block count, we use this shift in _gen */
+
+       if (1 << log != bs) {
+               ti->error = "cypher blocksize is not a power of 2";
+               return -EINVAL;
+       }
+
+       if (log > 9) {
+               ti->error = "cypher blocksize is > 512";
+               return -EINVAL;
+       }
+
+       cc->iv_gen_private.benbi_shift = 9 - log;
+
+       return 0;
+}
+
+static void crypt_iv_benbi_dtr(struct crypt_config *cc)
+{
+}
+
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+       __be64 val;
+
+       memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
+
+       val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+       put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
+
        return 0;
 }
 
@@ -219,13 +266,18 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
        .generator = crypt_iv_essiv_gen
 };
 
+static struct crypt_iv_operations crypt_iv_benbi_ops = {
+       .ctr       = crypt_iv_benbi_ctr,
+       .dtr       = crypt_iv_benbi_dtr,
+       .generator = crypt_iv_benbi_gen
+};
 
 static int
 crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                           struct scatterlist *in, unsigned int length,
                           int write, sector_t sector)
 {
-       u8 iv[cc->iv_size];
+       u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
        struct blkcipher_desc desc = {
                .tfm = cc->tfm,
                .info = iv,
@@ -458,11 +510,11 @@ static void dec_pending(struct crypt_io *io, int error)
  * interrupt context.
  */
 static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
 
 static void kcryptd_queue_io(struct crypt_io *io)
 {
-       INIT_WORK(&io->work, kcryptd_do_work, io);
+       INIT_WORK(&io->work, kcryptd_do_work);
        queue_work(_kcryptd_workqueue, &io->work);
 }
 
@@ -618,9 +670,9 @@ static void process_read_endio(struct crypt_io *io)
        dec_pending(io, crypt_convert(cc, &ctx));
 }
 
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
 {
-       struct crypt_io *io = data;
+       struct crypt_io *io = container_of(work, struct crypt_io, work);
 
        if (io->post_process)
                process_read_endio(io);
@@ -768,7 +820,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        cc->tfm = tfm;
 
        /*
-        * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
+        * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
         * See comments at iv code
         */
 
@@ -778,6 +830,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                cc->iv_gen_ops = &crypt_iv_plain_ops;
        else if (strcmp(ivmode, "essiv") == 0)
                cc->iv_gen_ops = &crypt_iv_essiv_ops;
+       else if (strcmp(ivmode, "benbi") == 0)
+               cc->iv_gen_ops = &crypt_iv_benbi_ops;
        else {
                ti->error = "Invalid IV mode";
                goto bad2;
index d754e0bc6e90c09f8ea73311e509e3d0a88f707d..cf8bf052138e5e9926912ef87098211de510558c 100644 (file)
@@ -101,11 +101,11 @@ typedef int (*action_fn) (struct pgpath *pgpath);
 
 #define MIN_IOS 256    /* Mempool size */
 
-static kmem_cache_t *_mpio_cache;
+static struct kmem_cache *_mpio_cache;
 
 struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                INIT_LIST_HEAD(&m->priority_groups);
                spin_lock_init(&m->lock);
                m->queue_io = 1;
-               INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
-               INIT_WORK(&m->trigger_event, trigger_event, m);
+               INIT_WORK(&m->process_queued_ios, process_queued_ios);
+               INIT_WORK(&m->trigger_event, trigger_event);
                m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
                if (!m->mpio_pool) {
                        kfree(m);
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m)
        }
 }
 
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
 {
-       struct multipath *m = (struct multipath *) data;
+       struct multipath *m =
+               container_of(work, struct multipath, process_queued_ios);
        struct hw_handler *hwh = &m->hw_handler;
        struct pgpath *pgpath = NULL;
        unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@ out:
  * An event is triggered whenever a path is taken out of use.
  * Includes path failure and PG bypass.
  */
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
 {
-       struct multipath *m = (struct multipath *) data;
+       struct multipath *m =
+               container_of(work, struct multipath, trigger_event);
 
        dm_table_event(m->ti->table);
 }
index 48a653b3f518d1a066b598291c50e2ac43e516fa..fc8cbb168e3ed2d5f8ae53bcec7762d0cc53008b 100644 (file)
@@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms)
        do_writes(ms, &writes);
 }
 
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
        struct mirror_set *ms;
 
@@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void)
                dm_dirty_log_exit();
                return r;
        }
-       INIT_WORK(&_kmirrord_work, do_work, NULL);
+       INIT_WORK(&_kmirrord_work, do_work);
 
        r = dm_register_target(&mirror_target);
        if (r < 0) {
index 5281e0094072b28fd5714ca8227d54c616422352..b0ce2ce82278a0bc93e1ca345ed227f4c9198339 100644 (file)
@@ -40,7 +40,7 @@
 #define SNAPSHOT_PAGES 256
 
 struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
 
 struct pending_exception {
        struct exception e;
@@ -88,8 +88,8 @@ struct pending_exception {
  * Hash table mapping origin volumes to lists of snapshots and
  * a lock to protect it
  */
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
 static mempool_t *pending_pool;
 
 /*
@@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
        return 0;
 }
 
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
 {
        struct list_head *slot;
        struct exception *ex, *next;
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        bio_list_init(&s->queued_bios);
-       INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+       INIT_WORK(&s->queued_bios_work, flush_queued_bios);
 
        /* Add snapshot to the list of snapshots for this origin */
        /* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
 {
-       struct dm_snapshot *s = (struct dm_snapshot *) data;
+       struct dm_snapshot *s =
+               container_of(work, struct dm_snapshot, queued_bios_work);
        struct bio *queued_bios;
        unsigned long flags;
 
index fc4f743f3b531e72bdb9c937d6cb2bf09d9ede1d..7ec1b112a6d5f59ab9ebc3007d1e1fca5d23b008 100644 (file)
@@ -121,8 +121,8 @@ struct mapped_device {
 };
 
 #define MIN_IOS 256
-static kmem_cache_t *_io_cache;
-static kmem_cache_t *_tio_cache;
+static struct kmem_cache *_io_cache;
+static struct kmem_cache *_tio_cache;
 
 static int __init local_init(void)
 {
index f1db6eff48574c80d9340960ac3deb0d25831ca0..b46f6c575f7ebd5ee79dbe543525ed5459e173d6 100644 (file)
@@ -203,7 +203,7 @@ struct kcopyd_job {
 /* FIXME: this should scale with the number of pages */
 #define MIN_JOBS 512
 
-static kmem_cache_t *_job_cache;
+static struct kmem_cache *_job_cache;
 static mempool_t *_job_pool;
 
 /*
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
 /*
  * kcopyd does this every time it's woken up.
  */
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
 {
        /*
         * The order that these are called is *very* important.
@@ -628,7 +628,7 @@ static int kcopyd_init(void)
        }
 
        kcopyd_clients++;
-       INIT_WORK(&_kcopyd_work, do_work, NULL);
+       INIT_WORK(&_kcopyd_work, do_work);
        mutex_unlock(&kcopyd_init_lock);
        return 0;
 }
index 8cbf9c9df1c3915ce109ba8daae07e5187ea7b21..6c4345bde07e1dad65014f5aed752f41bde414a7 100644 (file)
 #include <linux/raid/bitmap.h>
 #include <linux/sysctl.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include <linux/init.h>
 
index 69c3e201fa3bf0ee1ea51378fd172d4d4461e612..52914d5cec76ce1ded14a0fc37a9d2b98b1afe64 100644 (file)
@@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
 
 static int grow_stripes(raid5_conf_t *conf, int num)
 {
-       kmem_cache_t *sc;
+       struct kmem_cache *sc;
        int devs = conf->raid_disks;
 
        sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
@@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
        LIST_HEAD(newstripes);
        struct disk_info *ndisks;
        int err = 0;
-       kmem_cache_t *sc;
+       struct kmem_cache *sc;
        int i;
 
        if (newsize <= conf->pool_size)
index 06893243f3d4c038ccda257778f58531a01133e5..6e166801505d23636014d2a120d479538933a2d7 100644 (file)
@@ -63,7 +63,7 @@ struct flexcop_pci {
 
        unsigned long last_irq;
 
-       struct work_struct irq_check_work;
+       struct delayed_work irq_check_work;
 
        struct flexcop_device *fc_dev;
 };
@@ -97,9 +97,10 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi
        return 0;
 }
 
-static void flexcop_pci_irq_check_work(void *data)
+static void flexcop_pci_irq_check_work(struct work_struct *work)
 {
-       struct flexcop_pci *fc_pci = data;
+       struct flexcop_pci *fc_pci =
+               container_of(work, struct flexcop_pci, irq_check_work.work);
        struct flexcop_device *fc = fc_pci->fc_dev;
 
        flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714);
@@ -371,7 +372,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
        if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
                goto err_fc_exit;
 
-       INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci);
+       INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
 
        return ret;
 
index 8a7dd507cf6e4a98274adb74c9b450f35a3ca422..9123147e376f56c930c02b74e9bbb4d449b4090a 100644 (file)
@@ -128,7 +128,7 @@ struct cinergyt2 {
 
        struct dvbt_set_parameters_msg param;
        struct dvbt_get_status_msg status;
-       struct work_struct query_work;
+       struct delayed_work query_work;
 
        wait_queue_head_t poll_wq;
        int pending_fe_events;
@@ -142,7 +142,7 @@ struct cinergyt2 {
 #ifdef ENABLE_RC
        struct input_dev *rc_input_dev;
        char phys[64];
-       struct work_struct rc_query_work;
+       struct delayed_work rc_query_work;
        int rc_input_event;
        u32 rc_last_code;
        unsigned long last_event_jiffies;
@@ -287,7 +287,7 @@ static int cinergyt2_alloc_stream_urbs (struct cinergyt2 *cinergyt2)
        int i;
 
        cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE,
-                                             SLAB_KERNEL, &cinergyt2->streambuf_dmahandle);
+                                             GFP_KERNEL, &cinergyt2->streambuf_dmahandle);
        if (!cinergyt2->streambuf) {
                dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n");
                return -ENOMEM;
@@ -723,9 +723,10 @@ static struct dvb_device cinergyt2_fe_template = {
 
 #ifdef ENABLE_RC
 
-static void cinergyt2_query_rc (void *data)
+static void cinergyt2_query_rc (struct work_struct *work)
 {
-       struct cinergyt2 *cinergyt2 = data;
+       struct cinergyt2 *cinergyt2 =
+               container_of(work, struct cinergyt2, rc_query_work.work);
        char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS };
        struct cinergyt2_rc_event rc_events[12];
        int n, len, i;
@@ -806,7 +807,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
        strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys));
        cinergyt2->rc_input_event = KEY_MAX;
        cinergyt2->rc_last_code = ~0;
-       INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2);
+       INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc);
 
        input_dev->name = DRIVER_NAME " remote control";
        input_dev->phys = cinergyt2->phys;
@@ -847,9 +848,10 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { }
 
 #endif /* ENABLE_RC */
 
-static void cinergyt2_query (void *data)
+static void cinergyt2_query (struct work_struct *work)
 {
-       struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data;
+       struct cinergyt2 *cinergyt2 =
+               container_of(work, struct cinergyt2, query_work.work);
        char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS };
        struct dvbt_get_status_msg *s = &cinergyt2->status;
        uint8_t lock_bits;
@@ -893,7 +895,7 @@ static int cinergyt2_probe (struct usb_interface *intf,
 
        mutex_init(&cinergyt2->sem);
        init_waitqueue_head (&cinergyt2->poll_wq);
-       INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2);
+       INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query);
 
        cinergyt2->udev = interface_to_usbdev(intf);
        cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
index a2ab2eebfc685322a640d3c920ddefd1531b1033..e85972222ab42d1f01593941517d810f5cf13c37 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/list.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/jiffies.h>
 #include <asm/processor.h>
 
index 8859ab74f0fe4c65c8e75b9350a2a0b138615525..ebf4dc5190f68bbf2ebcc201b26f59ca43962dcf 100644 (file)
@@ -127,6 +127,7 @@ struct dvb_net_priv {
        int in_use;
        struct net_device_stats stats;
        u16 pid;
+       struct net_device *net;
        struct dvb_net *host;
        struct dmx_demux *demux;
        struct dmx_section_feed *secfeed;
@@ -1123,10 +1124,11 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc)
 }
 
 
-static void wq_set_multicast_list (void *data)
+static void wq_set_multicast_list (struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct dvb_net_priv *priv = dev->priv;
+       struct dvb_net_priv *priv =
+               container_of(work, struct dvb_net_priv, set_multicast_list_wq);
+       struct net_device *dev = priv->net;
 
        dvb_net_feed_stop(dev);
        priv->rx_mode = RX_MODE_UNI;
@@ -1167,9 +1169,11 @@ static void dvb_net_set_multicast_list (struct net_device *dev)
 }
 
 
-static void wq_restart_net_feed (void *data)
+static void wq_restart_net_feed (struct work_struct *work)
 {
-       struct net_device *dev = data;
+       struct dvb_net_priv *priv =
+               container_of(work, struct dvb_net_priv, restart_net_feed_wq);
+       struct net_device *dev = priv->net;
 
        if (netif_running(dev)) {
                dvb_net_feed_stop(dev);
@@ -1276,6 +1280,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
        dvbnet->device[if_num] = net;
 
        priv = net->priv;
+       priv->net = net;
        priv->demux = dvbnet->demux;
        priv->pid = pid;
        priv->rx_mode = RX_MODE_UNI;
@@ -1284,8 +1289,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
        priv->feedtype = feedtype;
        reset_ule(priv);
 
-       INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net);
-       INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net);
+       INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
+       INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
        mutex_init(&priv->mutex);
 
        net->base_addr = pid;
index 0a3a0b6c23509f8e80c95dae37d595bbd516d50e..794e4471561c3fd7e365769bf242701237ff0059 100644 (file)
  *
  * TODO: Fix the repeat rate of the input device.
  */
-static void dvb_usb_read_remote_control(void *data)
+static void dvb_usb_read_remote_control(struct work_struct *work)
 {
-       struct dvb_usb_device *d = data;
+       struct dvb_usb_device *d =
+               container_of(work, struct dvb_usb_device, rc_query_work.work);
        u32 event;
        int state;
 
@@ -128,7 +129,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d)
 
        input_register_device(d->rc_input_dev);
 
-       INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d);
+       INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control);
 
        info("schedule remote query interval to %d msecs.", d->props.rc_interval);
        schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval));
index 376c45a8e77991712260386717a22c12757930a6..0d721731a5241049c4e5130f63e0d4975458d792 100644 (file)
@@ -369,7 +369,7 @@ struct dvb_usb_device {
        /* remote control */
        struct input_dev *rc_input_dev;
        char rc_phys[64];
-       struct work_struct rc_query_work;
+       struct delayed_work rc_query_work;
        u32 last_event;
        int last_state;
 
index 78035ee824caf59e26fc50eefaf9fcc899e63da8..397f51a7b2ada50abe6078540f6f37ae25edef85 100644 (file)
@@ -116,7 +116,7 @@ static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num,
        for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) {
                deb_mem("allocating buffer %d\n",stream->buf_num);
                if (( stream->buf_list[stream->buf_num] =
-                                       usb_buffer_alloc(stream->udev, size, SLAB_ATOMIC,
+                                       usb_buffer_alloc(stream->udev, size, GFP_ATOMIC,
                                        &stream->dma_addr[stream->buf_num]) ) == NULL) {
                        deb_mem("not enough memory for urb-buffer allocation.\n");
                        usb_free_stream_buffers(stream);
index f3bc82e44a288b2381066c61683d2be93e55039d..1aeacb1c4af7b034ba17c2bd46f43af9fac47bc6 100644 (file)
@@ -36,7 +36,7 @@ struct l64781_state {
        struct dvb_frontend frontend;
 
        /* private demodulator data */
-       int first:1;
+       unsigned int first:1;
 };
 
 #define dprintk(args...) \
index 8135f3e76aeb3e1537e461061c1c014d646d81a4..10b121ada833b403b0ff4ef5140f5698f729532a 100644 (file)
@@ -1244,7 +1244,7 @@ static int ttusb_dec_init_usb(struct ttusb_dec *dec)
                        return -ENOMEM;
                }
                dec->irq_buffer = usb_buffer_alloc(dec->udev,IRQ_PACKET_SIZE,
-                                       SLAB_ATOMIC, &dec->irq_dma_handle);
+                                       GFP_ATOMIC, &dec->irq_dma_handle);
                if(!dec->irq_buffer) {
                        return -ENOMEM;
                }
index 6d96b17a7f818b018daacf163f8c8c38dbc26310..920b63f8cf051274d8459d2b828a2dd49263d759 100644 (file)
@@ -173,38 +173,6 @@ config RADIO_MAESTRO
          To compile this driver as a module, choose M here: the
          module will be called radio-maestro.
 
-config RADIO_MIROPCM20
-       tristate "miroSOUND PCM20 radio"
-       depends on ISA && VIDEO_V4L1 && SOUND_ACI_MIXER
-       ---help---
-         Choose Y here if you have this FM radio card. You also need to say Y
-         to "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20 radio)" (in "Sound")
-         for this to work.
-
-         In order to control your radio card, you will need to use programs
-         that are compatible with the Video For Linux API.  Information on
-         this API and pointers to "v4l" programs may be found at
-         <file:Documentation/video4linux/API.html>.
-
-         To compile this driver as a module, choose M here: the
-         module will be called miropcm20.
-
-config RADIO_MIROPCM20_RDS
-       tristate "miroSOUND PCM20 radio RDS user interface (EXPERIMENTAL)"
-       depends on RADIO_MIROPCM20 && EXPERIMENTAL
-       ---help---
-         Choose Y here if you want to see RDS/RBDS information like
-         RadioText, Programme Service name, Clock Time and date, Programme
-         Type and Traffic Announcement/Programme identification.
-
-         It's not possible to read the raw RDS packets from the device, so
-         the driver cant provide an V4L interface for this.  But the
-         availability of RDS is reported over V4L by the basic driver
-         already.  Here RDS can be read from files in /dev/v4l/rds.
-
-         To compile this driver as a module, choose M here: the
-         module will be called miropcm20-rds.
-
 config RADIO_SF16FMI
        tristate "SF16FMI Radio"
        depends on ISA && VIDEO_V4L2
index 41f4b8d17559d085eab1f3c6a3bf0f278eb71264..b12cec94f4cc917af762dd93d459587b80804240 100644 (file)
@@ -82,6 +82,8 @@ struct pp_cam_entry {
        struct pardevice *pdev;
        struct parport *port;
        struct work_struct cb_task;
+       void (*cb_func)(void *cbdata);
+       void *cb_data;
        int open_count;
        wait_queue_head_t wq_stream;
        /* image state flags */
@@ -130,6 +132,20 @@ static void cpia_parport_disable_irq( struct parport *port ) {
 #define PARPORT_CHUNK_SIZE     PAGE_SIZE
 
 
+static void cpia_pp_run_callback(struct work_struct *work)
+{
+       void (*cb_func)(void *cbdata);
+       void *cb_data;
+       struct pp_cam_entry *cam;
+
+       cam = container_of(work, struct pp_cam_entry, cb_task);
+       cb_func = cam->cb_func;
+       cb_data = cam->cb_data;
+       work_release(work);
+
+       cb_func(cb_data);
+}
+
 /****************************************************************************
  *
  *  CPiA-specific  low-level parport functions for nibble uploads
@@ -664,7 +680,9 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo
        int retval = 0;
 
        if(cam->port->irq != PARPORT_IRQ_NONE) {
-               INIT_WORK(&cam->cb_task, cb, cbdata);
+               cam->cb_func = cb;
+               cam->cb_data = cbdata;
+               INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback);
        } else {
                retval = -1;
        }
index 57e1c024a5478370241dd4abf24b97267613b4e5..e60a0a52e4b2c6c03612fe90a60bf72c6d0924a3 100644 (file)
@@ -145,9 +145,9 @@ static void ir_timer(unsigned long data)
        schedule_work(&ir->work);
 }
 
-static void cx88_ir_work(void *data)
+static void cx88_ir_work(struct work_struct *work)
 {
-       struct cx88_IR *ir = data;
+       struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
        unsigned long timeout;
 
        cx88_ir_handle_key(ir);
@@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
        core->ir = ir;
 
        if (ir->polling) {
-               INIT_WORK(&ir->work, cx88_ir_work, ir);
+               INIT_WORK(&ir->work, cx88_ir_work);
                init_timer(&ir->timer);
                ir->timer.function = ir_timer;
                ir->timer.data = (unsigned long)ir;
index 1457b1602221d9d9d430ae3445c5314dd6472a97..ab87e7bfe84f1c6afc3ab91de80a2be12f211ea3 100644 (file)
@@ -268,9 +268,9 @@ static void ir_timer(unsigned long data)
        schedule_work(&ir->work);
 }
 
-static void ir_work(void *data)
+static void ir_work(struct work_struct *work)
 {
-       struct IR_i2c *ir = data;
+       struct IR_i2c *ir = container_of(work, struct IR_i2c, work);
        ir_key_poll(ir);
        mod_timer(&ir->timer, jiffies+HZ/10);
 }
@@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
               ir->input->name,ir->input->phys,adap->name);
 
        /* start polling via eventd */
-       INIT_WORK(&ir->work, ir_work, ir);
+       INIT_WORK(&ir->work, ir_work);
        init_timer(&ir->timer);
        ir->timer.function = ir_timer;
        ir->timer.data     = (unsigned long)ir;
index cf43df3fe708c8eeba0fb4da03ab61b2fe5f7f7d..e1b56dc13c3f4c5f4dfa382d0c5036c54640db63 100644 (file)
@@ -56,7 +56,7 @@
 #include <media/tvaudio.h>
 #include <media/msp3400.h>
 #include <linux/kthread.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include "msp3400-driver.h"
 
 /* ---------------------------------------------------------------------- */
index f129f316d20eb42484ed84ffd67e2544dd5e9887..cf129746205dbae6e1775cd8fb7d0350b9ea5bcd 100644 (file)
@@ -45,16 +45,21 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp)
 }
 
 
-static void pvr2_context_poll(struct pvr2_context *mp)
+static void pvr2_context_poll(struct work_struct *work)
 {
+       struct pvr2_context *mp =
+               container_of(work, struct pvr2_context, workpoll);
        pvr2_context_enter(mp); do {
                pvr2_hdw_poll(mp->hdw);
        } while (0); pvr2_context_exit(mp);
 }
 
 
-static void pvr2_context_setup(struct pvr2_context *mp)
+static void pvr2_context_setup(struct work_struct *work)
 {
+       struct pvr2_context *mp =
+               container_of(work, struct pvr2_context, workinit);
+
        pvr2_context_enter(mp); do {
                if (!pvr2_hdw_dev_ok(mp->hdw)) break;
                pvr2_hdw_setup(mp->hdw);
@@ -92,8 +97,8 @@ struct pvr2_context *pvr2_context_create(
        }
 
        mp->workqueue = create_singlethread_workqueue("pvrusb2");
-       INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
-       INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+       INIT_WORK(&mp->workinit, pvr2_context_setup);
+       INIT_WORK(&mp->workpoll, pvr2_context_poll);
        queue_work(mp->workqueue,&mp->workinit);
  done:
        return mp;
index 7b9859c33018e41803be47f094626cd6b17409c2..92eabf88a09b72e336ce55ea1b4f4901b432ce18 100644 (file)
@@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data)
        schedule_work(&s->work);
 }
 
-static void saa6588_work(void *data)
+static void saa6588_work(struct work_struct *work)
 {
-       struct saa6588 *s = (struct saa6588 *)data;
+       struct saa6588 *s = container_of(work, struct saa6588, work);
 
        saa6588_i2c_poll(s);
        mod_timer(&s->timer, jiffies + msecs_to_jiffies(20));
@@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind)
        saa6588_configure(s);
 
        /* start polling via eventd */
-       INIT_WORK(&s->work, saa6588_work, s);
+       INIT_WORK(&s->work, saa6588_work);
        init_timer(&s->timer);
        s->timer.function = saa6588_timer;
        s->timer.data = (unsigned long)s;
index 65d044086ce953b9459b377b53c880aef657f29c..daaae870a2c4eacdb9956f292aac459fd4968fe6 100644 (file)
@@ -343,9 +343,10 @@ static struct video_device saa7134_empress_template =
        .minor         = -1,
 };
 
-static void empress_signal_update(void* data)
+static void empress_signal_update(struct work_struct *work)
 {
-       struct saa7134_dev* dev = (struct saa7134_dev*) data;
+       struct saa7134_dev* dev =
+               container_of(work, struct saa7134_dev, empress_workqueue);
 
        if (dev->nosignal) {
                dprintk("no video signal\n");
@@ -378,7 +379,7 @@ static int empress_init(struct saa7134_dev *dev)
                 "%s empress (%s)", dev->name,
                 saa7134_boards[dev->board].name);
 
-       INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev);
+       INIT_WORK(&dev->empress_workqueue, empress_signal_update);
 
        err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER,
                                    empress_nr[dev->nr]);
@@ -399,7 +400,7 @@ static int empress_init(struct saa7134_dev *dev)
                            sizeof(struct saa7134_buf),
                            dev);
 
-       empress_signal_update(dev);
+       empress_signal_update(&dev->empress_workqueue);
        return 0;
 }
 
index fcaef4bf82896da5e5c20d463fca436835cc75b1..d506dfaa45a991c4c895d23625c68ef7281b0778 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <media/tvaudio.h>
 #include <media/v4l2-common.h>
index f53edf1923b7de3fa558c2adf4c42980f97ad608..fcc5467e76364f982168b6d3d49077707a8edbde 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/fs.h>
 #include <linux/kthread.h>
 #include <linux/file.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 
 #include <media/video-buf.h>
 #include <media/video-buf-dvb.h>
index 3c8dc72dc8e971662b7267d545ee71e8e98a22b9..9986de5cb3d6d46860ee9ad6e12fdf6e498683fe 100644 (file)
@@ -36,6 +36,7 @@
 #include <media/v4l2-common.h>
 #include <linux/kthread.h>
 #include <linux/highmem.h>
+#include <linux/freezer.h>
 
 /* Wake up at about 30 fps */
 #define WAKE_NUMERATOR 30
index 051b7c5b8f03a6da69bef8edde3488f841638b46..6e068cf1049beed6ef94ff42d79e88c5a8809831 100644 (file)
@@ -347,7 +347,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
  *     @irq: irq number (not used)
  *     @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
@@ -387,14 +387,16 @@ mpt_interrupt(int irq, void *bus_id)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     mpt_base_reply - MPT base driver's callback routine; all base driver
- *     "internal" request/reply processing is routed here.
- *     Currently used for EventNotification and EventAck handling.
+/**
+ *     mpt_base_reply - MPT base driver's callback routine
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @mf: Pointer to original MPT request frame
  *     @reply: Pointer to MPT reply frame (NULL if TurboReply)
  *
+ *     MPT base driver's callback routine; all base driver
+ *     "internal" request/reply processing is routed here.
+ *     Currently used for EventNotification and EventAck handling.
+ *
  *     Returns 1 indicating original alloc'd request frame ptr
  *     should be freed, or 0 if it shouldn't.
  */
@@ -530,7 +532,7 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
  *     @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
  *
  *     This routine is called by a protocol-specific driver (SCSI host,
- *     LAN, SCSI target) to register it's reply callback routine.  Each
+ *     LAN, SCSI target) to register its reply callback routine.  Each
  *     protocol-specific driver must do this before it will be able to
  *     use any IOC resources, such as obtaining request frames.
  *
@@ -572,7 +574,7 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
  *     mpt_deregister - Deregister a protocol drivers resources.
  *     @cb_idx: previously registered callback handle
  *
- *     Each protocol-specific driver should call this routine when it's
+ *     Each protocol-specific driver should call this routine when its
  *     module is unloaded.
  */
 void
@@ -617,7 +619,7 @@ mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc)
  *
  *     Each protocol-specific driver should call this routine
  *     when it does not (or can no longer) handle events,
- *     or when it's module is unloaded.
+ *     or when its module is unloaded.
  */
 void
 mpt_event_deregister(int cb_idx)
@@ -656,7 +658,7 @@ mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func)
  *
  *     Each protocol-specific driver should call this routine
  *     when it does not (or can no longer) handle IOC reset handling,
- *     or when it's module is unloaded.
+ *     or when its module is unloaded.
  */
 void
 mpt_reset_deregister(int cb_idx)
@@ -670,6 +672,8 @@ mpt_reset_deregister(int cb_idx)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mpt_device_driver_register - Register device driver hooks
+ *     @dd_cbfunc: driver callbacks struct
+ *     @cb_idx: MPT protocol driver index
  */
 int
 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
@@ -696,6 +700,7 @@ mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mpt_device_driver_deregister - DeRegister device driver hooks
+ *     @cb_idx: MPT protocol driver index
  */
 void
 mpt_device_driver_deregister(int cb_idx)
@@ -887,8 +892,7 @@ mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_send_handshake_request - Send MPT request via doorbell
- *     handshake method.
+ *     mpt_send_handshake_request - Send MPT request via doorbell handshake method.
  *     @handle: Handle of registered MPT protocol driver
  *     @ioc: Pointer to MPT adapter structure
  *     @reqBytes: Size of the request in bytes
@@ -981,10 +985,13 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- * mpt_host_page_access_control - provides mechanism for the host
- * driver to control the IOC's Host Page Buffer access.
+ * mpt_host_page_access_control - control the IOC's Host Page Buffer access
  * @ioc: Pointer to MPT adapter structure
  * @access_control_value: define bits below
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Provides mechanism for the host driver to control the IOC's
+ * Host Page Buffer access.
  *
  * Access Control Value - bits[15:12]
  * 0h Reserved
@@ -1022,10 +1029,10 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mpt_host_page_alloc - allocate system memory for the fw
- *     If we already allocated memory in past, then resend the same pointer.
- *     ioc@: Pointer to pointer to IOC adapter
- *     ioc_init@: Pointer to ioc init config page
+ *     @ioc: Pointer to pointer to IOC adapter
+ *     @ioc_init: Pointer to ioc init config page
  *
+ *     If we already allocated memory in past, then resend the same pointer.
  *     Returns 0 for success, non-zero for failure.
  */
 static int
@@ -1091,12 +1098,15 @@ return 0;
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_verify_adapter - Given a unique IOC identifier, set pointer to
- *     the associated MPT adapter structure.
+ *     mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
  *     @iocid: IOC unique identifier (integer)
  *     @iocpp: Pointer to pointer to IOC adapter
  *
- *     Returns iocid and sets iocpp.
+ *     Given a unique IOC identifier, set pointer to the associated MPT
+ *     adapter structure.
+ *
+ *     Returns iocid and sets iocpp if iocid is found.
+ *     Returns -1 if iocid is not found.
  */
 int
 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
@@ -1115,9 +1125,10 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_attach - Install a PCI intelligent MPT adapter.
  *     @pdev: Pointer to pci_dev structure
+ *     @id: PCI device ID information
  *
  *     This routine performs all the steps necessary to bring the IOC of
  *     a MPT adapter to a OPERATIONAL state.  This includes registering
@@ -1417,10 +1428,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_detach - Remove a PCI intelligent MPT adapter.
  *     @pdev: Pointer to pci_dev structure
- *
  */
 
 void
@@ -1466,10 +1476,10 @@ mpt_detach(struct pci_dev *pdev)
  */
 #ifdef CONFIG_PM
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_suspend - Fusion MPT base driver suspend routine.
- *
- *
+ *     @pdev: Pointer to pci_dev structure
+ *     @state: new state to enter
  */
 int
 mpt_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -1505,10 +1515,9 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_resume - Fusion MPT base driver resume routine.
- *
- *
+ *     @pdev: Pointer to pci_dev structure
  */
 int
 mpt_resume(struct pci_dev *pdev)
@@ -1566,7 +1575,7 @@ mpt_signal_reset(int index, MPT_ADAPTER *ioc, int reset_phase)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_do_ioc_recovery - Initialize or recover MPT adapter.
  *     @ioc: Pointer to MPT adapter structure
  *     @reason: Event word / reason
@@ -1892,13 +1901,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     mpt_detect_bound_ports - Search for PCI bus/dev_function
- *     which matches PCI bus/dev_function (+/-1) for newly discovered 929,
- *     929X, 1030 or 1035.
+/**
+ *     mpt_detect_bound_ports - Search for matching PCI bus/dev_function
  *     @ioc: Pointer to MPT adapter structure
  *     @pdev: Pointer to (struct pci_dev) structure
  *
+ *     Search for PCI bus/dev_function which matches
+ *     PCI bus/dev_function (+/-1) for newly discovered 929,
+ *     929X, 1030 or 1035.
+ *
  *     If match on PCI dev_function +/-1 is found, bind the two MPT adapters
  *     using alt_ioc pointer fields in their %MPT_ADAPTER structures.
  */
@@ -1945,9 +1956,9 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_adapter_disable - Disable misbehaving MPT adapter.
- *     @this: Pointer to MPT adapter structure
+ *     @ioc: Pointer to MPT adapter structure
  */
 static void
 mpt_adapter_disable(MPT_ADAPTER *ioc)
@@ -2046,9 +2057,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     mpt_adapter_dispose - Free all resources associated with a MPT
- *     adapter.
+/**
+ *     mpt_adapter_dispose - Free all resources associated with an MPT adapter
  *     @ioc: Pointer to MPT adapter structure
  *
  *     This routine unregisters h/w resources and frees all alloc'd memory
@@ -2099,8 +2109,8 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     MptDisplayIocCapabilities - Disply IOC's capacilities.
+/**
+ *     MptDisplayIocCapabilities - Disply IOC's capabilities.
  *     @ioc: Pointer to MPT adapter structure
  */
 static void
@@ -2142,7 +2152,7 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     MakeIocReady - Get IOC to a READY state, using KickStart if needed.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @force: Force hard KickStart of IOC
@@ -2279,7 +2289,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_GetIocState - Get the current state of a MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @cooked: Request raw or cooked IOC state
@@ -2304,7 +2314,7 @@ mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     GetIocFacts - Send IOCFacts request to MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @sleepFlag: Specifies whether the process can sleep
@@ -2478,7 +2488,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     GetPortFacts - Send PortFacts request to MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @portnum: Port number
@@ -2545,7 +2555,7 @@ GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     SendIocInit - Send IOCInit request to MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @sleepFlag: Specifies whether the process can sleep
@@ -2630,7 +2640,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
        }
 
        /* No need to byte swap the multibyte fields in the reply
-        * since we don't even look at it's contents.
+        * since we don't even look at its contents.
         */
 
        dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
@@ -2672,7 +2682,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     SendPortEnable - Send PortEnable request to MPT adapter port.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @portnum: Port number to enable
@@ -2723,9 +2733,13 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
        return rc;
 }
 
-/*
- *     ioc: Pointer to MPT_ADAPTER structure
- *      size - total FW bytes
+/**
+ *     mpt_alloc_fw_memory - allocate firmware memory
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *      @size: total FW bytes
+ *
+ *     If memory has already been allocated, the same (cached) value
+ *     is returned.
  */
 void
 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
@@ -2742,9 +2756,12 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
                        ioc->alloc_total += size;
        }
 }
-/*
- * If alt_img is NULL, delete from ioc structure.
- * Else, delete a secondary image in same format.
+/**
+ *     mpt_free_fw_memory - free firmware memory
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ *     If alt_img is NULL, delete from ioc structure.
+ *     Else, delete a secondary image in same format.
  */
 void
 mpt_free_fw_memory(MPT_ADAPTER *ioc)
@@ -2763,7 +2780,7 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
 
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @sleepFlag: Specifies whether the process can sleep
@@ -2865,10 +2882,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_downloadboot - DownloadBoot code
  *     @ioc: Pointer to MPT_ADAPTER structure
- *     @flag: Specify which part of IOC memory is to be uploaded.
+ *     @pFwHeader: Pointer to firmware header info
  *     @sleepFlag: Specifies whether the process can sleep
  *
  *     FwDownloadBoot requires Programmed IO access.
@@ -3071,7 +3088,7 @@ mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     KickStart - Perform hard reset of MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @force: Force hard reset
@@ -3145,12 +3162,12 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_diag_reset - Perform hard reset of the adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @ignore: Set if to honor and clear to ignore
  *             the reset history bit
- *     @sleepflag: CAN_SLEEP if called in a non-interrupt thread,
+ *     @sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
  *             else set to NO_SLEEP (use mdelay instead)
  *
  *     This routine places the adapter in diagnostic mode via the
@@ -3436,11 +3453,12 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     SendIocReset - Send IOCReset request to MPT adapter.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @reset_type: reset type, expected values are
  *     %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
+ *     @sleepFlag: Specifies whether the process can sleep
  *
  *     Send IOCReset request to the MPT adapter.
  *
@@ -3494,11 +3512,12 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     initChainBuffers - Allocate memory for and initialize
- *     chain buffers, chain buffer control arrays and spinlock.
- *     @hd: Pointer to MPT_SCSI_HOST structure
- *     @init: If set, initialize the spin lock.
+/**
+ *     initChainBuffers - Allocate memory for and initialize chain buffers
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ *     Allocates memory for and initializes chain buffers,
+ *     chain buffer control arrays and spinlock.
  */
 static int
 initChainBuffers(MPT_ADAPTER *ioc)
@@ -3594,7 +3613,7 @@ initChainBuffers(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     PrimeIocFifos - Initialize IOC request and reply FIFOs.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *
@@ -3891,15 +3910,15 @@ mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit
- *     in it's IntStatus register.
+/**
+ *     WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @howlong: How long to wait (in seconds)
  *     @sleepFlag: Specifies whether the process can sleep
  *
  *     This routine waits (up to ~2 seconds max) for IOC doorbell
- *     handshake ACKnowledge.
+ *     handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
+ *     bit in its IntStatus register being clear.
  *
  *     Returns a negative value on failure, else wait loop count.
  */
@@ -3942,14 +3961,14 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit
- *     in it's IntStatus register.
+/**
+ *     WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @howlong: How long to wait (in seconds)
  *     @sleepFlag: Specifies whether the process can sleep
  *
- *     This routine waits (up to ~2 seconds max) for IOC doorbell interrupt.
+ *     This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
+ *     (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
  *
  *     Returns a negative value on failure, else wait loop count.
  */
@@ -3991,8 +4010,8 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     WaitForDoorbellReply - Wait for and capture a IOC handshake reply.
+/**
+ *     WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @howlong: How long to wait (in seconds)
  *     @sleepFlag: Specifies whether the process can sleep
@@ -4077,7 +4096,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     GetLanConfigPages - Fetch LANConfig pages.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4188,12 +4207,9 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
+/**
+ *     mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
  *     @ioc: Pointer to MPT_ADAPTER structure
- *     @sas_address: 64bit SAS Address for operation.
- *     @target_id: specified target for operation
- *     @bus: specified bus for operation
  *     @persist_opcode: see below
  *
  *     MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
@@ -4202,7 +4218,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
  *
  *     NOTE: Don't use not this function during interrupt time.
  *
- *     Returns: 0 for success, non-zero error
+ *     Returns 0 for success, non-zero error
  */
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -4399,7 +4415,7 @@ mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     GetIoUnitPage2 - Retrieve BIOS version and boot order information.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *
@@ -4457,7 +4473,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*     mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
+/**
+ *     mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
  *     @ioc: Pointer to a Adapter Strucutre
  *     @portnum: IOC port number
  *
@@ -4644,7 +4661,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*     mpt_readScsiDevicePageHeaders - save version and length of SDP1
+/**
+ *     mpt_readScsiDevicePageHeaders - save version and length of SDP1
  *     @ioc: Pointer to a Adapter Strucutre
  *     @portnum: IOC port number
  *
@@ -4996,9 +5014,8 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     SendEventNotification - Send EventNotification (on or off) request
- *     to MPT adapter.
+/**
+ *     SendEventNotification - Send EventNotification (on or off) request to adapter
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @EvSwitch: Event switch flags
  */
@@ -5062,8 +5079,8 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mpt_config - Generic function to issue config message
- *     @ioc - Pointer to an adapter structure
- *     @cfg - Pointer to a configuration structure. Struct contains
+ *     @ioc:   Pointer to an adapter structure
+ *     @pCfg:  Pointer to a configuration structure. Struct contains
  *             action, page address, direction, physical address
  *             and pointer to a configuration page header
  *             Page header is updated.
@@ -5188,8 +5205,8 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     mpt_timer_expired - Call back for timer process.
+/**
+ *     mpt_timer_expired - Callback for timer process.
  *     Used only internal config functionality.
  *     @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
  */
@@ -5214,12 +5231,12 @@ mpt_timer_expired(unsigned long data)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_ioc_reset - Base cleanup for hard reset
  *     @ioc: Pointer to the adapter structure
  *     @reset_phase: Indicates pre- or post-reset functionality
  *
- *     Remark: Free's resources with internally generated commands.
+ *     Remark: Frees resources with internally generated commands.
  */
 static int
 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
@@ -5271,7 +5288,7 @@ mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
  *     procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
  *
  *     Returns 0 for success, non-zero for failure.
@@ -5297,7 +5314,7 @@ procmpt_create(void)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
  *
  *     Returns 0 for success, non-zero for failure.
@@ -5311,16 +5328,16 @@ procmpt_destroy(void)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     procmpt_summary_read - Handle read request from /proc/mpt/summary
- *     or from /proc/mpt/iocN/summary.
+/**
+ *     procmpt_summary_read - Handle read request of a summary file
  *     @buf: Pointer to area to write information
  *     @start: Pointer to start pointer
  *     @offset: Offset to start writing
- *     @request:
+ *     @request: Amount of read data requested
  *     @eof: Pointer to EOF integer
  *     @data: Pointer
  *
+ *     Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
  *     Returns number of characters written to process performing the read.
  */
 static int
@@ -5355,12 +5372,12 @@ procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eo
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     procmpt_version_read - Handle read request from /proc/mpt/version.
  *     @buf: Pointer to area to write information
  *     @start: Pointer to start pointer
  *     @offset: Offset to start writing
- *     @request:
+ *     @request: Amount of read data requested
  *     @eof: Pointer to EOF integer
  *     @data: Pointer
  *
@@ -5411,12 +5428,12 @@ procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eo
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
  *     @buf: Pointer to area to write information
  *     @start: Pointer to start pointer
  *     @offset: Offset to start writing
- *     @request:
+ *     @request: Amount of read data requested
  *     @eof: Pointer to EOF integer
  *     @data: Pointer
  *
@@ -5577,16 +5594,17 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
  */
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_HardResetHandler - Generic reset handler, issue SCSI Task
- *     Management call based on input arg values.  If TaskMgmt fails,
- *     return associated SCSI request.
+ *     mpt_HardResetHandler - Generic reset handler
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @sleepFlag: Indicates if sleep or schedule must be called.
  *
+ *     Issues SCSI Task Management call based on input arg values.
+ *     If TaskMgmt fails, returns associated SCSI request.
+ *
  *     Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
  *     or a non-interrupt thread.  In the former, must not call schedule().
  *
- *     Remark: A return of -1 is a FATAL error case, as it means a
+ *     Note: A return of -1 is a FATAL error case, as it means a
  *     FW reload/initialization failed.
  *
  *     Returns 0 for SUCCESS or -1 if FAILED.
@@ -5935,13 +5953,14 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
- *     ProcessEventNotification - Route a received EventNotificationReply to
- *     all currently regeistered event handlers.
+/**
+ *     ProcessEventNotification - Route EventNotificationReply to all event handlers
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @pEventReply: Pointer to EventNotification reply frame
  *     @evHandlers: Pointer to integer, number of event handlers
  *
+ *     Routes a received EventNotificationReply to all currently registered
+ *     event handlers.
  *     Returns sum of event handlers return values.
  */
 static int
@@ -6056,7 +6075,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_fc_log_info - Log information returned from Fibre Channel IOC.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @log_info: U32 LogInfo reply word from the IOC
@@ -6077,7 +6096,7 @@ mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @mr: Pointer to MPT reply frame
@@ -6200,7 +6219,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
        };
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_sas_log_info - Log information returned from SAS IOC.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @log_info: U32 LogInfo reply word from the IOC
@@ -6255,7 +6274,7 @@ union loginfo_type {
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @ioc_status: U32 IOCStatus word from IOC
@@ -6416,7 +6435,7 @@ EXPORT_SYMBOL(mpt_free_fw_memory);
 EXPORT_SYMBOL(mptbase_sas_persist_operation);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     fusion_init - Fusion MPT base driver initialization routine.
  *
  *     Returns 0 for success, non-zero for failure.
@@ -6456,7 +6475,7 @@ fusion_init(void)
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*
+/**
  *     fusion_exit - Perform driver unload cleanup.
  *
  *     This routine frees all resources associated with each MPT adapter
index 1dd491773150f9f63089923f8eb40f5caa95ee4f..ca2f9107f1459ff465b6ea7244d7ce24d5038117 100644 (file)
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
 }
 
 static void
-mptfc_setup_reset(void *arg)
+mptfc_setup_reset(struct work_struct *work)
 {
-       MPT_ADAPTER             *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER             *ioc =
+               container_of(work, MPT_ADAPTER, fc_setup_reset_work);
        u64                     pn;
        struct mptfc_rport_info *ri;
 
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg)
 }
 
 static void
-mptfc_rescan_devices(void *arg)
+mptfc_rescan_devices(struct work_struct *work)
 {
-       MPT_ADAPTER             *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER             *ioc =
+               container_of(work, MPT_ADAPTER, fc_rescan_work);
        int                     ii;
        u64                     pn;
        struct mptfc_rport_info *ri;
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         }
 
        spin_lock_init(&ioc->fc_rescan_work_lock);
-       INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc);
-       INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc);
+       INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+       INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
 
        spin_lock_irqsave(&ioc->FreeQlock, flags);
 
@@ -1393,8 +1395,7 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptfc_init - Register MPT adapter(s) as SCSI host(s) with
- *     linux scsi mid-layer.
+ *     mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *     Returns 0 for success, non-zero for failure.
  */
@@ -1438,7 +1439,7 @@ mptfc_init(void)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptfc_remove - Removed fc infrastructure for devices
+ *     mptfc_remove - Remove fc infrastructure for devices
  *     @pdev: Pointer to pci_dev structure
  *
  */
index 314c3a27585d8cc686a01243f930fe7489f6a2ed..b7c4407c5e3f2d5f3d5c6b6b4ae89bb61fc28154 100644 (file)
@@ -111,7 +111,8 @@ struct mpt_lan_priv {
        u32 total_received;
        struct net_device_stats stats;  /* Per device statistics */
 
-       struct work_struct post_buckets_task;
+       struct delayed_work post_buckets_task;
+       struct net_device *dev;
        unsigned long post_buckets_active;
 };
 
@@ -132,7 +133,7 @@ static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
 static int  mpt_lan_open(struct net_device *dev);
 static int  mpt_lan_reset(struct net_device *dev);
 static int  mpt_lan_close(struct net_device *dev);
-static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
                                           int priority);
 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
                        priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
                spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
        } else {
-               mpt_lan_post_receive_buckets(dev);
+               mpt_lan_post_receive_buckets(priv);
                netif_wake_queue(dev);
        }
 
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev)
 
        dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
 
-       mpt_lan_post_receive_buckets(dev);
+       mpt_lan_post_receive_buckets(priv);
        printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
                        IOC_AND_NETDEV_NAMES_s_s(dev));
 
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
        
        if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
                if (priority) {
-                       schedule_work(&priv->post_buckets_task);
+                       schedule_delayed_work(&priv->post_buckets_task, 0);
                } else {
                        schedule_delayed_work(&priv->post_buckets_task, 1);
                        dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev,
 /* Simple SGE's only at the moment */
 
 static void
-mpt_lan_post_receive_buckets(void *dev_id)
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
 {
-       struct net_device *dev = dev_id;
-       struct mpt_lan_priv *priv = dev->priv;
+       struct net_device *dev = priv->dev;
        MPT_ADAPTER *mpt_dev = priv->mpt_dev;
        MPT_FRAME_HDR *mf;
        LANReceivePostRequest_t *pRecvReq;
@@ -1335,6 +1335,13 @@ out:
        clear_bit(0, &priv->post_buckets_active);
 }
 
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+       mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+                                                 post_buckets_task.work));
+}
+
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 static struct net_device *
 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
 
        priv = netdev_priv(dev);
 
+       priv->dev = dev;
        priv->mpt_dev = mpt_dev;
        priv->pnum = pnum;
 
-       memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
-       INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+       memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task));
+       INIT_DELAYED_WORK(&priv->post_buckets_task,
+                         mpt_lan_post_receive_buckets_work);
        priv->post_buckets_active = 0;
 
        dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
index b752a479f6dbb01a43cc31d5b7d62f736c9f2d84..4f0c530e47b05fc3c77cc25b2a42cf561fb37923 100644 (file)
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc)
  *(Mutex LOCKED)
  */
 static void
-mptsas_discovery_work(void * arg)
+mptsas_discovery_work(struct work_struct *work)
 {
-       struct mptsas_discovery_event *ev = arg;
+       struct mptsas_discovery_event *ev =
+               container_of(work, struct mptsas_discovery_event, work);
        MPT_ADAPTER *ioc = ev->ioc;
 
        mutex_lock(&ioc->sas_discovery_mutex);
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id)
  * Work queue thread to clear the persitency table
  */
 static void
-mptsas_persist_clear_table(void * arg)
+mptsas_persist_clear_table(struct work_struct *work)
 {
-       MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
+       MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
 
        mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
 }
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
  * Work queue thread to handle SAS hotplug events
  */
 static void
-mptsas_hotplug_work(void *arg)
+mptsas_hotplug_work(struct work_struct *work)
 {
-       struct mptsas_hotplug_event *ev = arg;
+       struct mptsas_hotplug_event *ev =
+               container_of(work, struct mptsas_hotplug_event, work);
        MPT_ADAPTER *ioc = ev->ioc;
        struct mptsas_phyinfo *phy_info;
        struct sas_rphy *rphy;
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
                        break;
                }
 
-               INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+               INIT_WORK(&ev->work, mptsas_hotplug_work);
                ev->ioc = ioc;
                ev->handle = le16_to_cpu(sas_event_data->DevHandle);
                ev->parent_handle =
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc,
         * Persistent table is full.
         */
                INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table, (void *)ioc);
+                   mptsas_persist_clear_table);
                schedule_work(&ioc->sas_persist_task);
                break;
        case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
                return;
        }
 
-       INIT_WORK(&ev->work, mptsas_hotplug_work, ev);
+       INIT_WORK(&ev->work, mptsas_hotplug_work);
        ev->ioc = ioc;
        ev->id = raid_event_data->VolumeID;
        ev->event_type = MPTSAS_IGNORE_EVENT;
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
        ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
        if (!ev)
                return;
-       INIT_WORK(&ev->work, mptsas_discovery_work, ev);
+       INIT_WORK(&ev->work, mptsas_discovery_work);
        ev->ioc = ioc;
        schedule_work(&ev->work);
 };
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
                break;
        case MPI_EVENT_PERSISTENT_TABLE_FULL:
                INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table,
-                   (void *)ioc);
+                   mptsas_persist_clear_table);
                schedule_work(&ioc->sas_persist_task);
                break;
         case MPI_EVENT_SAS_DISCOVERY:
index 30524dc54b16b720dea158f45c5f477ff446cf19..2c72c36b8171cd67f26114c9c2f97b4d70b0b2d6 100644 (file)
@@ -1230,15 +1230,15 @@ mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mptscsih_proc_info - Return information about MPT adapter
+ *     @host:   scsi host struct
+ *     @buffer: if write, user data; if read, buffer for user
+ *     @start: returns the buffer address
+ *     @offset: if write, 0; if read, the current offset into the buffer from
+ *              the previous read.
+ *     @length: if write, return length;
+ *     @func:   write = 1; read = 0
  *
  *     (linux scsi_host_template.info routine)
- *
- *     buffer: if write, user data; if read, buffer for user
- *     length: if write, return length;
- *     offset: if write, 0; if read, the current offset into the buffer from
- *             the previous read.
- *     hostno: scsi host number
- *     func:   if write = 1; if read = 0
  */
 int
 mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
@@ -1902,8 +1902,7 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptscsih_host_reset - Perform a SCSI host adapter RESET!
- *     new_eh variant
+ *     mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
  *     @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
  *
  *     (linux scsi_host_template.eh_host_reset_handler routine)
@@ -1949,8 +1948,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptscsih_tm_pending_wait - wait for pending task management request to
- *             complete.
+ *     mptscsih_tm_pending_wait - wait for pending task management request to complete
  *     @hd: Pointer to MPT host structure.
  *
  *     Returns {SUCCESS,FAILED}.
@@ -1982,6 +1980,7 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
 /**
  *     mptscsih_tm_wait_for_completion - wait for completion of TM task
  *     @hd: Pointer to MPT host structure.
+ *     @timeout: timeout in seconds
  *
  *     Returns {SUCCESS,FAILED}.
  */
@@ -3429,8 +3428,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
 /**
  *     mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
  *     @hd: Pointer to a SCSI HOST structure
- *     @vtarget: per device private data
- *     @lun: lun
+ *     @vdevice: virtual target device
  *
  *     Uses the ISR, but with special processing.
  *     MUST be single-threaded.
index e4cc3dd5fc9fbeb17f2154ed544bd6e39306b074..36641da59289eea7a4468892d1a85a48503737b5 100644 (file)
@@ -646,9 +646,10 @@ struct work_queue_wrapper {
        int                     disk;
 };
 
-static void mpt_work_wrapper(void *data)
+static void mpt_work_wrapper(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct _MPT_SCSI_HOST *hd = wqw->hd;
        struct Scsi_Host *shost = hd->ioc->sh;
        struct scsi_device *sdev;
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
                           disk);
                return;
        }
-       INIT_WORK(&wqw->work, mpt_work_wrapper, wqw);
+       INIT_WORK(&wqw->work, mpt_work_wrapper);
        wqw->hd = hd;
        wqw->disk = disk;
 
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
  * renegotiate for a given target
  */
 static void
-mptspi_dv_renegotiate_work(void *data)
+mptspi_dv_renegotiate_work(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct _MPT_SCSI_HOST *hd = wqw->hd;
        struct scsi_device *sdev;
 
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
        if (!wqw)
                return;
 
-       INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw);
+       INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
        wqw->hd = hd;
 
        schedule_work(&wqw->work);
@@ -1098,8 +1100,7 @@ static struct pci_driver mptspi_driver = {
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptspi_init - Register MPT adapter(s) as SCSI host(s) with
- *     linux scsi mid-layer.
+ *     mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
  *
  *     Returns 0 for success, non-zero for failure.
  */
@@ -1133,7 +1134,6 @@ mptspi_init(void)
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mptspi_exit - Unregisters MPT adapter(s)
- *
  */
 static void __exit
 mptspi_exit(void)
index d96c687aee9373fe7638fa61bb88e335b58b65ea..c463dc2efc09490b26bdc6e2d0e46fdd694dce74 100644 (file)
@@ -56,6 +56,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
 /**
  *     i2o_bus_store_scan - Scan the I2O Bus Adapter
  *     @d: device which should be scanned
+ *     @attr: device_attribute
+ *     @buf: output buffer
+ *     @count: buffer size
  *
  *     Returns count.
  */
index ee183053fa23a54d23785446b4ba7585e3beb7ed..b9df143e4ff1962cb049ba11e8166425ffdd8b9f 100644 (file)
@@ -54,8 +54,8 @@ static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
  *     @dev: I2O device to claim
  *     @drv: I2O driver which wants to claim the device
  *
- *     Do the leg work to assign a device to a given OSM. If the claim succeed
- *     the owner of the rimary. If the attempt fails a negative errno code
+ *     Do the leg work to assign a device to a given OSM. If the claim succeeds,
+ *     the owner is the primary. If the attempt fails a negative errno code
  *     is returned. On success zero is returned.
  */
 int i2o_device_claim(struct i2o_device *dev)
@@ -208,24 +208,23 @@ static struct i2o_device *i2o_device_alloc(void)
 
 /**
  *     i2o_device_add - allocate a new I2O device and add it to the IOP
- *     @iop: I2O controller where the device is on
+ *     @c: I2O controller that the device is on
  *     @entry: LCT entry of the I2O device
  *
  *     Allocate a new I2O device and initialize it with the LCT entry. The
  *     device is appended to the device list of the controller.
  *
- *     Returns a pointer to the I2O device on success or negative error code
- *     on failure.
+ *     Returns zero on success, or a -ve errno.
  */
-static struct i2o_device *i2o_device_add(struct i2o_controller *c,
-                                        i2o_lct_entry * entry)
+static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
 {
        struct i2o_device *i2o_dev, *tmp;
+       int rc;
 
        i2o_dev = i2o_device_alloc();
        if (IS_ERR(i2o_dev)) {
                printk(KERN_ERR "i2o: unable to allocate i2o device\n");
-               return i2o_dev;
+               return PTR_ERR(i2o_dev);
        }
 
        i2o_dev->lct_data = *entry;
@@ -236,7 +235,9 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
        i2o_dev->iop = c;
        i2o_dev->device.parent = &c->device;
 
-       device_register(&i2o_dev->device);
+       rc = device_register(&i2o_dev->device);
+       if (rc)
+               goto err;
 
        list_add_tail(&i2o_dev->list, &c->devices);
 
@@ -270,12 +271,16 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
 
        pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
 
-       return i2o_dev;
+       return 0;
+
+err:
+       kfree(i2o_dev);
+       return rc;
 }
 
 /**
  *     i2o_device_remove - remove an I2O device from the I2O core
- *     @dev: I2O device which should be released
+ *     @i2o_dev: I2O device which should be released
  *
  *     Is used on I2O controller removal or LCT modification, when the device
  *     is removed from the system. Note that the device could still hang
index 64130227574f8cf32aec2113139f99eb6b5de699..9104b65ff70f0e85de7522deddd017313159edf6 100644 (file)
@@ -34,9 +34,7 @@ static spinlock_t i2o_drivers_lock;
 static struct i2o_driver **i2o_drivers;
 
 /**
- *     i2o_bus_match - Tell if a I2O device class id match the class ids of
- *                     the I2O driver (OSM)
- *
+ *     i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM)
  *     @dev: device which should be verified
  *     @drv: the driver to match against
  *
@@ -232,7 +230,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
                        break;
                }
 
-               INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt);
+               INIT_WORK(&evt->work, drv->event);
                queue_work(drv->event_queue, &evt->work);
                return 1;
        }
@@ -248,7 +246,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
 
 /**
  *     i2o_driver_notify_controller_add_all - Send notify of added controller
- *                                            to all I2O drivers
+ *     @c: newly added controller
  *
  *     Send notifications to all registered drivers that a new controller was
  *     added.
@@ -267,8 +265,8 @@ void i2o_driver_notify_controller_add_all(struct i2o_controller *c)
 }
 
 /**
- *     i2o_driver_notify_controller_remove_all - Send notify of removed
- *                                               controller to all I2O drivers
+ *     i2o_driver_notify_controller_remove_all - Send notify of removed controller
+ *     @c: controller that is being removed
  *
  *     Send notifications to all registered drivers that a controller was
  *     removed.
@@ -287,8 +285,8 @@ void i2o_driver_notify_controller_remove_all(struct i2o_controller *c)
 }
 
 /**
- *     i2o_driver_notify_device_add_all - Send notify of added device to all
- *                                        I2O drivers
+ *     i2o_driver_notify_device_add_all - Send notify of added device
+ *     @i2o_dev: newly added I2O device
  *
  *     Send notifications to all registered drivers that a device was added.
  */
@@ -306,8 +304,8 @@ void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev)
 }
 
 /**
- *     i2o_driver_notify_device_remove_all - Send notify of removed device to
- *                                           all I2O drivers
+ *     i2o_driver_notify_device_remove_all - Send notify of removed device
+ *     @i2o_dev: device that is being removed
  *
  *     Send notifications to all registered drivers that a device was removed.
  */
@@ -362,7 +360,7 @@ int __init i2o_driver_init(void)
 /**
  *     i2o_driver_exit - clean up I2O drivers (OSMs)
  *
- *     Unregisters the I2O bus and free driver array.
+ *     Unregisters the I2O bus and frees driver array.
  */
 void __exit i2o_driver_exit(void)
 {
index a2350640384b5a75c09fc17a5f744c9a7fac9462..902753b2c66101858fe295e7bc97c24cff674389 100644 (file)
@@ -94,8 +94,8 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
 };
 
 /**
- *     i2o_exec_wait_free - Free a i2o_exec_wait struct
- *     @i2o_exec_wait: I2O wait data which should be cleaned up
+ *     i2o_exec_wait_free - Free an i2o_exec_wait struct
+ *     @wait: I2O wait data which should be cleaned up
  */
 static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
 {
@@ -105,7 +105,7 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
 /**
  *     i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
  *     @c: controller
- *     @m: message to post
+ *     @msg: message to post
  *     @timeout: time in seconds to wait
  *     @dma: i2o_dma struct of the DMA buffer to free on failure
  *
@@ -269,6 +269,7 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
 /**
  *     i2o_exec_show_vendor_id - Displays Vendor ID of controller
  *     @d: device of which the Vendor ID should be displayed
+ *     @attr: device_attribute to display
  *     @buf: buffer into which the Vendor ID should be printed
  *
  *     Returns number of bytes printed into buffer.
@@ -290,6 +291,7 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d,
 /**
  *     i2o_exec_show_product_id - Displays Product ID of controller
  *     @d: device of which the Product ID should be displayed
+ *     @attr: device_attribute to display
  *     @buf: buffer into which the Product ID should be printed
  *
  *     Returns number of bytes printed into buffer.
@@ -365,14 +367,16 @@ static int i2o_exec_remove(struct device *dev)
 
 /**
  *     i2o_exec_lct_modified - Called on LCT NOTIFY reply
- *     @c: I2O controller on which the LCT has modified
+ *     @work: work struct for a specific controller
  *
  *     This function handles asynchronus LCT NOTIFY replies. It parses the
  *     new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
  *     again, otherwise send LCT NOTIFY to get informed on next LCT change.
  */
-static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work)
+static void i2o_exec_lct_modified(struct work_struct *_work)
 {
+       struct i2o_exec_lct_notify_work *work =
+               container_of(_work, struct i2o_exec_lct_notify_work, work);
        u32 change_ind = 0;
        struct i2o_controller *c = work->c;
 
@@ -439,8 +443,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
 
                work->c = c;
 
-               INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified,
-                         work);
+               INIT_WORK(&work->work, i2o_exec_lct_modified);
                queue_work(i2o_exec_driver.event_queue, &work->work);
                return 1;
        }
@@ -460,13 +463,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
 
 /**
  *     i2o_exec_event - Event handling function
- *     @evt: Event which occurs
+ *     @work: Work item in occurring event
  *
  *     Handles events send by the Executive device. At the moment does not do
  *     anything useful.
  */
-static void i2o_exec_event(struct i2o_event *evt)
+static void i2o_exec_event(struct work_struct *work)
 {
+       struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
        if (likely(evt->i2o_dev))
                osm_debug("Event received from device: %d\n",
                          evt->i2o_dev->lct_data.tid);
index eaba81bf2ecad7a023d6e1ae22563ed9900bfcab..da9859f2caf2b3e083ebbc665d7e67a1268e5cca 100644 (file)
@@ -259,7 +259,7 @@ static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
 /**
  *     i2o_block_device_power - Power management for device dev
  *     @dev: I2O device which should receive the power management request
- *     @operation: Operation which should be send
+ *     @op: Operation to send
  *
  *     Send a power management request to the device dev.
  *
@@ -315,7 +315,7 @@ static inline struct i2o_block_request *i2o_block_request_alloc(void)
  *     i2o_block_request_free - Frees a I2O block request
  *     @ireq: I2O block request which should be freed
  *
- *     Fres the allocated memory (give it back to the request mempool).
+ *     Frees the allocated memory (give it back to the request mempool).
  */
 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
 {
@@ -326,6 +326,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
  *     i2o_block_sglist_alloc - Allocate the SG list and map it
  *     @c: I2O controller to which the request belongs
  *     @ireq: I2O block request
+ *     @mptr: message body pointer
  *
  *     Builds the SG list and map it to be accessable by the controller.
  *
@@ -419,16 +420,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
 
 /**
  *     i2o_block_delayed_request_fn - delayed request queue function
- *     delayed_request: the delayed request with the queue to start
+ *     @work: the delayed request with the queue to start
  *
  *     If the request queue is stopped for a disk, and there is no open
  *     request, a new event is created, which calls this function to start
  *     the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
  *     be started again.
  */
-static void i2o_block_delayed_request_fn(void *delayed_request)
+static void i2o_block_delayed_request_fn(struct work_struct *work)
 {
-       struct i2o_block_delayed_request *dreq = delayed_request;
+       struct i2o_block_delayed_request *dreq =
+               container_of(work, struct i2o_block_delayed_request,
+                            work.work);
        struct request_queue *q = dreq->queue;
        unsigned long flags;
 
@@ -488,7 +491,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
  *     i2o_block_reply - Block OSM reply handler.
  *     @c: I2O controller from which the message arrives
  *     @m: message id of reply
- *     qmsg: the actuall I2O message reply
+ *     @msg: the actual I2O message reply
  *
  *     This function gets all the message replies.
  *
@@ -538,8 +541,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
        return 1;
 };
 
-static void i2o_block_event(struct i2o_event *evt)
+static void i2o_block_event(struct work_struct *work)
 {
+       struct i2o_event *evt = container_of(work, struct i2o_event, work);
        osm_debug("event received\n");
        kfree(evt);
 };
@@ -599,6 +603,8 @@ static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
 
 /**
  *     i2o_block_open - Open the block device
+ *     @inode: inode for block device being opened
+ *     @file: file to open
  *
  *     Power up the device, mount and lock the media. This function is called,
  *     if the block device is opened for access.
@@ -626,6 +632,8 @@ static int i2o_block_open(struct inode *inode, struct file *file)
 
 /**
  *     i2o_block_release - Release the I2O block device
+ *     @inode: inode for block device being released
+ *     @file: file to close
  *
  *     Unlock and unmount the media, and power down the device. Gets called if
  *     the block device is closed.
@@ -672,6 +680,8 @@ static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 
 /**
  *     i2o_block_ioctl - Issue device specific ioctl calls.
+ *     @inode: inode for block device ioctl
+ *     @file: file for ioctl
  *     @cmd: ioctl command
  *     @arg: arg
  *
@@ -899,7 +909,7 @@ static int i2o_block_transfer(struct request *req)
 
 /**
  *     i2o_block_request_fn - request queue handling function
- *     q: request queue from which the request could be fetched
+ *     @q: request queue from which the request could be fetched
  *
  *     Takes the next request from the queue, transfers it and if no error
  *     occurs dequeue it from the queue. On arrival of the reply the message
@@ -938,8 +948,8 @@ static void i2o_block_request_fn(struct request_queue *q)
                                continue;
 
                        dreq->queue = q;
-                       INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
-                                 dreq);
+                       INIT_DELAYED_WORK(&dreq->work,
+                                         i2o_block_delayed_request_fn);
 
                        if (!queue_delayed_work(i2o_block_driver.event_queue,
                                                &dreq->work,
index 4fdaa5bda4125d57b7591bd89f9e9054fc47ddcb..67f921b4419b687b898e9655a52e70a08c289f30 100644 (file)
@@ -64,7 +64,7 @@
 
 /* I2O Block OSM mempool struct */
 struct i2o_block_mempool {
-       kmem_cache_t *slab;
+       struct kmem_cache *slab;
        mempool_t *pool;
 };
 
@@ -96,7 +96,7 @@ struct i2o_block_request {
 
 /* I2O Block device delayed request */
 struct i2o_block_delayed_request {
-       struct work_struct work;
+       struct delayed_work work;
        struct request_queue *queue;
 };
 
index 7d23e082bf26712ef886e401ed89ed2fd56c0f50..1de30d711671ce91ae354027fe36f9b2e2e508fb 100644 (file)
@@ -265,7 +265,11 @@ static int i2o_cfg_swdl(unsigned long arg)
                return -ENOMEM;
        }
 
-       __copy_from_user(buffer.virt, kxfer.buf, fragsize);
+       if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
+               i2o_msg_nop(c, msg);
+               i2o_dma_free(&c->pdev->dev, &buffer);
+               return -EFAULT;
+       }
 
        msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
        msg->u.head[1] =
@@ -516,7 +520,6 @@ static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
        return 0;
 }
 
-#ifdef CONFIG_I2O_EXT_ADAPTEC
 #ifdef CONFIG_COMPAT
 static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
                              unsigned long arg)
@@ -759,6 +762,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
 
 #endif
 
+#ifdef CONFIG_I2O_EXT_ADAPTEC
 static int i2o_cfg_passthru(unsigned long arg)
 {
        struct i2o_cmd_passthru __user *cmd =
index 3d2e76eea93e9d8fb0779bd66462bc497d1237a2..a61cb17c5c12646cab01c5e0dc6026b5d76b6b05 100644 (file)
@@ -163,7 +163,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
  *     i2o_get_class_name -    do i2o class name lookup
  *     @class: class number
  *
- *     Return a descriptive string for an i2o class
+ *     Return a descriptive string for an i2o class.
  */
 static const char *i2o_get_class_name(int class)
 {
index 6ebf38213f9f87f336e77ce65134c9371ce612d6..1045c8a518bbbdb453ba9e40a951646b48e26bf2 100644 (file)
@@ -220,7 +220,7 @@ static int i2o_scsi_probe(struct device *dev)
        u32 id = -1;
        u64 lun = -1;
        int channel = -1;
-       int i;
+       int i, rc;
 
        i2o_shost = i2o_scsi_get_host(c);
        if (!i2o_shost)
@@ -304,14 +304,20 @@ static int i2o_scsi_probe(struct device *dev)
                return PTR_ERR(scsi_dev);
        }
 
-       sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
-                         "scsi");
+       rc = sysfs_create_link(&i2o_dev->device.kobj,
+                              &scsi_dev->sdev_gendev.kobj, "scsi");
+       if (rc)
+               goto err;
 
        osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
                 i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
                 (long unsigned int)le64_to_cpu(lun));
 
        return 0;
+
+err:
+       scsi_remove_device(scsi_dev);
+       return rc;
 };
 
 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
@@ -405,8 +411,7 @@ static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev)
 };
 
 /**
- *     i2o_scsi_notify_device_remove - Retrieve notifications of removed
- *                                     devices
+ *     i2o_scsi_notify_device_remove - Retrieve notifications of removed devices
  *     @i2o_dev: the I2O device which was removed
  *
  *     If a I2O device is removed, we catch the notification to remove the
@@ -426,8 +431,7 @@ static void i2o_scsi_notify_device_remove(struct i2o_device *i2o_dev)
 };
 
 /**
- *     i2o_scsi_notify_controller_add - Retrieve notifications of added
- *                                      controllers
+ *     i2o_scsi_notify_controller_add - Retrieve notifications of added controllers
  *     @c: the controller which was added
  *
  *     If a I2O controller is added, we catch the notification to add a
@@ -457,8 +461,7 @@ static void i2o_scsi_notify_controller_add(struct i2o_controller *c)
 };
 
 /**
- *     i2o_scsi_notify_controller_remove - Retrieve notifications of removed
- *                                         controllers
+ *     i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers
  *     @c: the controller which was removed
  *
  *     If a I2O controller is removed, we catch the notification to remove the
@@ -745,7 +748,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
  *     @capacity: size in sectors
  *     @ip: geometry array
  *
- *     This is anyones guess quite frankly. We use the same rules everyone
+ *     This is anyone's guess quite frankly. We use the same rules everyone
  *     else appears to and hope. It seems to work.
  */
 
index 8287f95c8c422d85d6107c162c158fe22165e117..3661e6e065d27c705c86d0e5d215b24523939adc 100644 (file)
@@ -259,6 +259,7 @@ static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id)
 
 /**
  *     i2o_pci_irq_enable - Allocate interrupt for I2O controller
+ *     @c: i2o_controller that the request is for
  *
  *     Allocate an interrupt for the I2O controller, and activate interrupts
  *     on the I2O controller.
@@ -305,7 +306,7 @@ static void i2o_pci_irq_disable(struct i2o_controller *c)
 
 /**
  *     i2o_pci_probe - Probe the PCI device for an I2O controller
- *     @dev: PCI device to test
+ *     @pdev: PCI device to test
  *     @id: id which matched with the PCI device id table
  *
  *     Probe the PCI device for any device which is a memory of the
@@ -447,7 +448,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
 
 /**
  *     i2o_pci_remove - Removes a I2O controller from the system
- *     pdev: I2O controller which should be removed
+ *     @pdev: I2O controller which should be removed
  *
  *     Reset the I2O controller, disable interrupts and remove all allocated
  *     resources.
index 82938ad6ddbd8282752af18c91dcdf37e33b392a..ce1a48108210d38f645d34caa1273ad4d0acd60d 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/string.h>
 #include <linux/input.h>
 #include <linux/device.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
 
index 1ba8754e93837e417fb88155aeb0bcd968ca9e79..2ab7add78f94259c79106dceb6eeeeacbcf952c2 100644 (file)
@@ -33,9 +33,10 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
        spin_unlock_irqrestore(&fm->lock, flags);
 }
 
-static void tifm_7xx1_remove_media(void *adapter)
+static void tifm_7xx1_remove_media(struct work_struct *work)
 {
-       struct tifm_adapter *fm = adapter;
+       struct tifm_adapter *fm =
+               container_of(work, struct tifm_adapter, media_remover);
        unsigned long flags;
        int cnt;
        struct tifm_dev *sock;
@@ -169,9 +170,10 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
        return base_addr + ((sock_num + 1) << 10);
 }
 
-static void tifm_7xx1_insert_media(void *adapter)
+static void tifm_7xx1_insert_media(struct work_struct *work)
 {
-       struct tifm_adapter *fm = adapter;
+       struct tifm_adapter *fm =
+               container_of(work, struct tifm_adapter, media_inserter);
        unsigned long flags;
        tifm_media_id media_id;
        char *card_name = "xx";
@@ -261,7 +263,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
        spin_unlock_irqrestore(&fm->lock, flags);
        flush_workqueue(fm->wq);
 
-       tifm_7xx1_remove_media(fm);
+       tifm_7xx1_remove_media(&fm->media_remover);
 
        pci_set_power_state(dev, PCI_D3hot);
         pci_disable_device(dev);
@@ -328,8 +330,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
        if (!fm->sockets)
                goto err_out_free;
 
-       INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
-       INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
+       INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media);
+       INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media);
        fm->eject = tifm_7xx1_eject;
        pci_set_drvdata(dev, fm);
 
@@ -384,7 +386,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev)
 
        flush_workqueue(fm->wq);
 
-       tifm_7xx1_remove_media(fm);
+       tifm_7xx1_remove_media(&fm->media_remover);
 
        writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
        free_irq(dev->irq, fm);
index ee326136d03bf409d5604229255a9a9591396792..d61df5c3ac367cd8e38650c1c44c19db99f3785d 100644 (file)
@@ -219,8 +219,9 @@ static int tifm_device_remove(struct device *dev)
        struct tifm_driver *drv = fm_dev->drv;
 
        if (drv) {
-               if (drv->remove) drv->remove(fm_dev);
-               fm_dev->drv = 0;
+               if (drv->remove)
+                       drv->remove(fm_dev);
+               fm_dev->drv = NULL;
        }
 
        put_device(dev);
index 9d190022a4905f8d3682699ab2c87ec67e693ff6..6f2a282e2b9759c0511cf5501a0bd4e9bd501e3f 100644 (file)
@@ -1419,18 +1419,16 @@ static void mmc_setup(struct mmc_host *host)
  */
 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
 {
-       if (delay)
-               mmc_schedule_delayed_work(&host->detect, delay);
-       else
-               mmc_schedule_work(&host->detect);
+       mmc_schedule_delayed_work(&host->detect, delay);
 }
 
 EXPORT_SYMBOL(mmc_detect_change);
 
 
-static void mmc_rescan(void *data)
+static void mmc_rescan(struct work_struct *work)
 {
-       struct mmc_host *host = data;
+       struct mmc_host *host =
+               container_of(work, struct mmc_host, detect.work);
        struct list_head *l, *n;
        unsigned char power_mode;
 
@@ -1513,7 +1511,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
                spin_lock_init(&host->lock);
                init_waitqueue_head(&host->wq);
                INIT_LIST_HEAD(&host->cards);
-               INIT_WORK(&host->detect, mmc_rescan, host);
+               INIT_DELAYED_WORK(&host->detect, mmc_rescan);
 
                /*
                 * By default, hosts do not support SGIO or large requests.
@@ -1611,7 +1609,7 @@ EXPORT_SYMBOL(mmc_suspend_host);
  */
 int mmc_resume_host(struct mmc_host *host)
 {
-       mmc_rescan(host);
+       mmc_rescan(&host->detect.work);
 
        return 0;
 }
index cd5e0ab3d84b46c62018d4ac0fa3930dd704953e..149affe0b6860772c75ff0eb9c87dc03f69b7f52 100644 (file)
@@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host);
 void mmc_free_host_sysfs(struct mmc_host *host);
 
 int mmc_schedule_work(struct work_struct *work);
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay);
 void mmc_flush_scheduled_work(void);
 #endif
index ac53296360453c6067f6c565ee0b2857b7fc049d..e334acd045bced56ac88edf5850bde81297d8087 100644 (file)
@@ -320,18 +320,10 @@ void mmc_free_host_sysfs(struct mmc_host *host)
 
 static struct workqueue_struct *workqueue;
 
-/*
- * Internal function. Schedule work in the MMC work queue.
- */
-int mmc_schedule_work(struct work_struct *work)
-{
-       return queue_work(workqueue, work);
-}
-
 /*
  * Internal function. Schedule delayed work in the MMC work queue.
  */
-int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay)
 {
        return queue_delayed_work(workqueue, work, delay);
 }
index 0fdc55b08a6daa72cb60ccdd8fc981e0cc648f7b..e846499a004c676751659b40dc03e5c3e6ed509d 100644 (file)
@@ -99,7 +99,7 @@ struct tifm_sd {
 
        struct mmc_request    *req;
        struct work_struct    cmd_handler;
-       struct work_struct    abort_handler;
+       struct delayed_work   abort_handler;
        wait_queue_head_t     can_eject;
 
        size_t                written_blocks;
@@ -496,9 +496,9 @@ err_out:
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd(void *data)
+static void tifm_sd_end_cmd(struct work_struct *work)
 {
-       struct tifm_sd *host = data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        struct mmc_request *mrq;
@@ -608,9 +608,9 @@ err_out:
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_end_cmd_nodma(void *data)
+static void tifm_sd_end_cmd_nodma(struct work_struct *work)
 {
-       struct tifm_sd *host = (struct tifm_sd*)data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        struct mmc_request *mrq;
@@ -661,11 +661,14 @@ static void tifm_sd_end_cmd_nodma(void *data)
        mmc_request_done(mmc, mrq);
 }
 
-static void tifm_sd_abort(void *data)
+static void tifm_sd_abort(struct work_struct *work)
 {
+       struct tifm_sd *host =
+               container_of(work, struct tifm_sd, abort_handler.work);
+
        printk(KERN_ERR DRIVER_NAME
                ": card failed to respond for a long period of time");
-       tifm_eject(((struct tifm_sd*)data)->dev);
+       tifm_eject(host->dev);
 }
 
 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -762,9 +765,9 @@ static struct mmc_host_ops tifm_sd_ops = {
        .get_ro  = tifm_sd_ro
 };
 
-static void tifm_sd_register_host(void *data)
+static void tifm_sd_register_host(struct work_struct *work)
 {
-       struct tifm_sd *host = (struct tifm_sd*)data;
+       struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler);
        struct tifm_dev *sock = host->dev;
        struct mmc_host *mmc = tifm_get_drvdata(sock);
        unsigned long flags;
@@ -772,8 +775,7 @@ static void tifm_sd_register_host(void *data)
        spin_lock_irqsave(&sock->lock, flags);
        host->flags |= HOST_REG;
        PREPARE_WORK(&host->cmd_handler,
-                       no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
-                       data);
+                       no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd);
        spin_unlock_irqrestore(&sock->lock, flags);
        dev_dbg(&sock->dev, "adding host\n");
        mmc_add_host(mmc);
@@ -799,8 +801,8 @@ static int tifm_sd_probe(struct tifm_dev *sock)
        host->dev = sock;
        host->clk_div = 61;
        init_waitqueue_head(&host->can_eject);
-       INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
-       INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
+       INIT_WORK(&host->cmd_handler, tifm_sd_register_host);
+       INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort);
 
        tifm_set_drvdata(sock, mmc);
        sock->signal_irq = tifm_sd_signal_irq;
index ef4a731ca5c2d455468dbeb328eb93d661e6e67d..334e078ffafffcd07b0453a6b3458eea0e56f484 100644 (file)
@@ -451,7 +451,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
                return -ENODEV;
        }
 
-       flash = kzalloc(sizeof *flash, SLAB_KERNEL);
+       flash = kzalloc(sizeof *flash, GFP_KERNEL);
        if (!flash)
                return -ENOMEM;
 
index 11d170afa9c398de778eb643a84ae6db843f700c..06e33786078d3ed937801a430fee1c6277e6bb26 100644 (file)
@@ -922,7 +922,7 @@ int __init init_module(void)
  * and then free up the resources we took when the card was found.
  */
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        struct net_device *dev = dev_3c501;
        unregister_netdev(dev);
index a34b2206132d2fb6d716419eddc0b20ee50b8a8f..7e34c4f07b70723668d27a4baa2c48d1a1c8a011 100644 (file)
@@ -726,7 +726,7 @@ static void cleanup_card(struct net_device *dev)
                iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index 458cb9cbe9157dcda0d458b920e1e6622446bf1f..702bfb2a5e99d047f82e6d2b80e46c48396108e2 100644 (file)
@@ -1670,7 +1670,7 @@ int __init init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index aa43563610ae552df0cc25ea6051d9f1d44faf71..54e1d5aebed3af51a0ade5eedd3c032a3377428f 100644 (file)
@@ -940,7 +940,7 @@ int __init init_module(void)
        return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        struct net_device *dev = dev_3c507;
index 91849469b4f49f0118f935b33c4caeb0c1afb683..17d61eb0a7e5f1bfbf11580f43f40edc6caebe76 100644 (file)
@@ -1302,7 +1302,7 @@ int __init init_module(void)
        } else return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
        for (this_dev=0; this_dev<MAX_3C523_CARDS; this_dev++) {
index f4aca5386add1c94238d23205e408ef19e8b12e1..6c7437e60bd212e72486d42b16fc6c4e72396580 100644 (file)
@@ -1659,7 +1659,7 @@ int __init init_module(void)
  *     transmit operations are allowed to start scribbling into memory.
  */
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(this_device);
        cleanup_card(this_device);
index d02ed51abfccab65e2397cc063e4eb53a169bcee..931028f672de0eb610622895927db75b3bc23d57 100644 (file)
@@ -594,7 +594,7 @@ struct rtl8139_private {
        u32 rx_config;
        struct rtl_extra_stats xstats;
 
-       struct work_struct thread;
+       struct delayed_work thread;
 
        struct mii_if_info mii;
        unsigned int regs_len;
@@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev);
 static void rtl8139_set_rx_mode (struct net_device *dev);
 static void __set_rx_mode (struct net_device *dev);
 static void rtl8139_hw_start (struct net_device *dev);
-static void rtl8139_thread (void *_data);
-static void rtl8139_tx_timeout_task(void *_data);
+static void rtl8139_thread (struct work_struct *work);
+static void rtl8139_tx_timeout_task(struct work_struct *work);
 static const struct ethtool_ops rtl8139_ethtool_ops;
 
 /* write MMIO register, with flush */
@@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
                (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
        spin_lock_init (&tp->lock);
        spin_lock_init (&tp->rx_lock);
-       INIT_WORK(&tp->thread, rtl8139_thread, dev);
+       INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
        tp->mii.dev = dev;
        tp->mii.mdio_read = mdio_read;
        tp->mii.mdio_write = mdio_write;
@@ -1596,15 +1596,16 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
                 RTL_R8 (Config1));
 }
 
-static void rtl8139_thread (void *_data)
+static void rtl8139_thread (struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8139_private *tp = netdev_priv(dev);
+       struct rtl8139_private *tp =
+               container_of(work, struct rtl8139_private, thread.work);
+       struct net_device *dev = tp->mii.dev;
        unsigned long thr_delay = next_tick;
 
        if (tp->watchdog_fired) {
                tp->watchdog_fired = 0;
-               rtl8139_tx_timeout_task(_data);
+               rtl8139_tx_timeout_task(work);
        } else if (rtnl_trylock()) {
                rtl8139_thread_iter (dev, tp, tp->mmio_addr);
                rtnl_unlock ();
@@ -1646,10 +1647,11 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
        /* XXX account for unsent Tx packets in tp->stats.tx_dropped */
 }
 
-static void rtl8139_tx_timeout_task (void *_data)
+static void rtl8139_tx_timeout_task (struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8139_private *tp = netdev_priv(dev);
+       struct rtl8139_private *tp =
+               container_of(work, struct rtl8139_private, thread.work);
+       struct net_device *dev = tp->mii.dev;
        void __iomem *ioaddr = tp->mmio_addr;
        int i;
        u8 tmp8;
@@ -1695,7 +1697,7 @@ static void rtl8139_tx_timeout (struct net_device *dev)
        struct rtl8139_private *tp = netdev_priv(dev);
 
        if (!tp->have_thread) {
-               INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev);
+               INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task);
                schedule_delayed_work(&tp->thread, next_tick);
        } else
                tp->watchdog_fired = 1;
index 0dca8bb9d2c724254947964abd642bc39647fc17..c01f87f5bed77b4f6d93cb5cf013a73f6f9e7122 100644 (file)
@@ -405,7 +405,7 @@ static void cleanup_card(struct net_device *dev)
        iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index 9164d8cd670e4cb6e6054abb9add4cf3e30c38f7..d4e4081690737bb945d51c6bcc03e9f6097a8596 100644 (file)
@@ -568,7 +568,7 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
 #ifdef MODULE
 static struct net_device *apne_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
        apne_dev = apne_probe(-1);
        if (IS_ERR(apne_dev))
@@ -576,7 +576,7 @@ int init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(apne_dev);
 
index cc1a27ed197fd93f0b360f0545e40610fb823557..dba5e5165452ba6a72bd250b4bd3d63b8bb7f250 100644 (file)
@@ -1041,7 +1041,7 @@ int __init init_module(void)
         return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(cops_dev);
        cleanup_card(cops_dev);
index b54b857e357e5318d7656b7b581ad5e1cd827111..fada15d959dec6fd6a7e8f72c8c32ed50afdb3df 100644 (file)
@@ -41,9 +41,6 @@
 #define DRV_NAME       "at91_ether"
 #define DRV_VERSION    "1.0"
 
-static struct net_device *at91_dev;
-
-static struct timer_list check_timer;
 #define LINK_POLL_INTERVAL     (HZ)
 
 /* ..................................................................... */
@@ -146,7 +143,7 @@ static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int
  */
 static void update_linkspeed(struct net_device *dev, int silent)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned int bmsr, bmcr, lpa, mac_cfg;
        unsigned int speed, duplex;
 
@@ -199,7 +196,7 @@ static void update_linkspeed(struct net_device *dev, int silent)
 static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *) dev_id;
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned int phy;
 
        /*
@@ -242,7 +239,7 @@ done:
  */
 static void enable_phyirq(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned int dsintr, irq_number;
        int status;
 
@@ -252,8 +249,7 @@ static void enable_phyirq(struct net_device *dev)
                 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
                 * or board does not have it connected.
                 */
-               check_timer.expires = jiffies + LINK_POLL_INTERVAL;
-               add_timer(&check_timer);
+               mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
                return;
        }
 
@@ -294,13 +290,13 @@ static void enable_phyirq(struct net_device *dev)
  */
 static void disable_phyirq(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned int dsintr;
        unsigned int irq_number;
 
        irq_number = lp->board_data.phy_irq_pin;
        if (!irq_number) {
-               del_timer_sync(&check_timer);
+               del_timer_sync(&lp->check_timer);
                return;
        }
 
@@ -340,7 +336,7 @@ static void disable_phyirq(struct net_device *dev)
 #if 0
 static void reset_phy(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned int bmcr;
 
        spin_lock_irq(&lp->lock);
@@ -362,13 +358,13 @@ static void reset_phy(struct net_device *dev)
 static void at91ether_check_link(unsigned long dev_id)
 {
        struct net_device *dev = (struct net_device *) dev_id;
+       struct at91_private *lp = netdev_priv(dev);
 
        enable_mdi();
        update_linkspeed(dev, 1);
        disable_mdi();
 
-       check_timer.expires = jiffies + LINK_POLL_INTERVAL;
-       add_timer(&check_timer);
+       mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
 }
 
 /* ......................... ADDRESS MANAGEMENT ........................ */
@@ -590,7 +586,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
 
 static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        int ret;
 
        spin_lock_irq(&lp->lock);
@@ -611,7 +607,7 @@ static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cm
 
 static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        int ret;
 
        spin_lock_irq(&lp->lock);
@@ -627,7 +623,7 @@ static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cm
 
 static int at91ether_nwayreset(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        int ret;
 
        spin_lock_irq(&lp->lock);
@@ -658,7 +654,7 @@ static const struct ethtool_ops at91ether_ethtool_ops = {
 
 static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        int res;
 
        if (!netif_running(dev))
@@ -680,7 +676,7 @@ static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  */
 static void at91ether_start(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        struct recv_desc_bufs *dlist, *dlist_phys;
        int i;
        unsigned long ctl;
@@ -712,7 +708,7 @@ static void at91ether_start(struct net_device *dev)
  */
 static int at91ether_open(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned long ctl;
 
        if (!is_valid_ether_addr(dev->dev_addr))
@@ -752,7 +748,7 @@ static int at91ether_open(struct net_device *dev)
  */
 static int at91ether_close(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned long ctl;
 
        /* Disable Receiver and Transmitter */
@@ -779,7 +775,7 @@ static int at91ether_close(struct net_device *dev)
  */
 static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
 
        if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
                netif_stop_queue(dev);
@@ -811,7 +807,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev)
  */
 static struct net_device_stats *at91ether_stats(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        int ale, lenerr, seqe, lcol, ecol;
 
        if (netif_running(dev)) {
@@ -847,7 +843,7 @@ static struct net_device_stats *at91ether_stats(struct net_device *dev)
  */
 static void at91ether_rx(struct net_device *dev)
 {
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        struct recv_desc_bufs *dlist;
        unsigned char *p_recv;
        struct sk_buff *skb;
@@ -857,14 +853,13 @@ static void at91ether_rx(struct net_device *dev)
        while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) {
                p_recv = dlist->recv_buf[lp->rxBuffIndex];
                pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff;      /* Length of frame including FCS */
-               skb = alloc_skb(pktlen + 2, GFP_ATOMIC);
+               skb = dev_alloc_skb(pktlen + 2);
                if (skb != NULL) {
                        skb_reserve(skb, 2);
                        memcpy(skb_put(skb, pktlen), p_recv, pktlen);
 
                        skb->dev = dev;
                        skb->protocol = eth_type_trans(skb, dev);
-                       skb->len = pktlen;
                        dev->last_rx = jiffies;
                        lp->stats.rx_bytes += pktlen;
                        netif_rx(skb);
@@ -891,7 +886,7 @@ static void at91ether_rx(struct net_device *dev)
 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = (struct net_device *) dev_id;
-       struct at91_private *lp = (struct at91_private *) dev->priv;
+       struct at91_private *lp = netdev_priv(dev);
        unsigned long intstatus, ctl;
 
        /* MAC Interrupt Status register indicates what interrupts are pending.
@@ -927,6 +922,17 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void at91ether_poll_controller(struct net_device *dev)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       at91ether_interrupt(dev->irq, dev);
+       local_irq_restore(flags);
+}
+#endif
+
 /*
  * Initialize the ethernet interface
  */
@@ -939,9 +945,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
        unsigned int val;
        int res;
 
-       if (at91_dev)                   /* already initialized */
-               return 0;
-
        dev = alloc_etherdev(sizeof(struct at91_private));
        if (!dev)
                return -ENOMEM;
@@ -957,7 +960,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
        }
 
        /* Allocate memory for DMA Receive descriptors */
-       lp = (struct at91_private *)dev->priv;
+       lp = netdev_priv(dev);
        lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
        if (lp->dlist == NULL) {
                free_irq(dev->irq, dev);
@@ -979,6 +982,9 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
        dev->set_mac_address = set_mac_address;
        dev->ethtool_ops = &at91ether_ethtool_ops;
        dev->do_ioctl = at91ether_ioctl;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = at91ether_poll_controller;
+#endif
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -1024,7 +1030,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
                dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
                return res;
        }
-       at91_dev = dev;
 
        /* Determine current link speed */
        spin_lock_irq(&lp->lock);
@@ -1036,9 +1041,9 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
 
        /* If board has no PHY IRQ, use a timer to poll the PHY */
        if (!lp->board_data.phy_irq_pin) {
-               init_timer(&check_timer);
-               check_timer.data = (unsigned long)dev;
-               check_timer.function = at91ether_check_link;
+               init_timer(&lp->check_timer);
+               lp->check_timer.data = (unsigned long)dev;
+               lp->check_timer.function = at91ether_check_link;
        }
 
        /* Display ethernet banner */
@@ -1115,15 +1120,16 @@ static int __init at91ether_probe(struct platform_device *pdev)
 
 static int __devexit at91ether_remove(struct platform_device *pdev)
 {
-       struct at91_private *lp = (struct at91_private *) at91_dev->priv;
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct at91_private *lp = netdev_priv(dev);
 
-       unregister_netdev(at91_dev);
-       free_irq(at91_dev->irq, at91_dev);
+       unregister_netdev(dev);
+       free_irq(dev->irq, dev);
        dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
        clk_put(lp->ether_clk);
 
-       free_netdev(at91_dev);
-       at91_dev = NULL;
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(dev);
        return 0;
 }
 
@@ -1131,8 +1137,8 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
 
 static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
 {
-       struct at91_private *lp = (struct at91_private *) at91_dev->priv;
        struct net_device *net_dev = platform_get_drvdata(pdev);
+       struct at91_private *lp = netdev_priv(net_dev);
        int phy_irq = lp->board_data.phy_irq_pin;
 
        if (netif_running(net_dev)) {
@@ -1149,8 +1155,8 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
 
 static int at91ether_resume(struct platform_device *pdev)
 {
-       struct at91_private *lp = (struct at91_private *) at91_dev->priv;
        struct net_device *net_dev = platform_get_drvdata(pdev);
+       struct at91_private *lp = netdev_priv(net_dev);
        int phy_irq = lp->board_data.phy_irq_pin;
 
        if (netif_running(net_dev)) {
index d1e72e02be3adb05082ab17e4b079062abdbb844..b6b665de2ea0c28ac56e4ba30a23044fe0b29b36 100644 (file)
@@ -87,6 +87,7 @@ struct at91_private
        spinlock_t lock;                        /* lock for MDI interface */
        short phy_media;                        /* media interface type */
        unsigned short phy_address;             /* 5-bit MDI address of PHY (0..31) */
+       struct timer_list check_timer;          /* Poll link status */
 
        /* Transmit */
        struct sk_buff *skb;                    /* holds skb until xmit interrupt completes */
index f3478a30e7787ea7375db5af7c0c9d2aae8d6bf2..d6da3ce9ad7967776716827d5e8474fe7a2599f1 100644 (file)
@@ -254,7 +254,7 @@ ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsig
        } while (thislen);
 }
 
-static int __init
+static int __devinit
 ether1_ramtest(struct net_device *dev, unsigned char byte)
 {
        unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
@@ -308,7 +308,7 @@ ether1_reset (struct net_device *dev)
        return BUS_16;
 }
 
-static int __init
+static int __devinit
 ether1_init_2(struct net_device *dev)
 {
        int i;
@@ -986,7 +986,7 @@ ether1_setmulticastlist (struct net_device *dev)
 
 /* ------------------------------------------------------------------------- */
 
-static void __init ether1_banner(void)
+static void __devinit ether1_banner(void)
 {
        static unsigned int version_printed = 0;
 
index 84686c8a5bc271aa706a985770c5e70708995043..4fc234785d56069b47ce25261a0c8c8a148f29de 100644 (file)
@@ -198,7 +198,7 @@ static inline void ether3_ledon(struct net_device *dev)
  * Read the ethernet address string from the on board rom.
  * This is an ascii string!!!
  */
-static int __init
+static int __devinit
 ether3_addr(char *addr, struct expansion_card *ec)
 {
        struct in_chunk_dir cd;
@@ -223,7 +223,7 @@ ether3_addr(char *addr, struct expansion_card *ec)
 
 /* --------------------------------------------------------------------------- */
 
-static int __init
+static int __devinit
 ether3_ramtest(struct net_device *dev, unsigned char byte)
 {
        unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
@@ -272,7 +272,7 @@ ether3_ramtest(struct net_device *dev, unsigned char byte)
 
 /* ------------------------------------------------------------------------------- */
 
-static int __init ether3_init_2(struct net_device *dev)
+static int __devinit ether3_init_2(struct net_device *dev)
 {
        int i;
 
@@ -765,7 +765,7 @@ static void ether3_tx(struct net_device *dev)
        }
 }
 
-static void __init ether3_banner(void)
+static void __devinit ether3_banner(void)
 {
        static unsigned version_printed = 0;
 
index 8620a5b470f55fbd16bbb4156be98622388247db..56ae8babd919d3d302c332fa783c9072d9d93e7a 100644 (file)
@@ -908,7 +908,7 @@ int __init init_module(void)
        return 0;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        unregister_netdev(dev_at1700);
index d79489e462499bf1ecd9bf700171747caccd9a2e..7e37ac86a69ab9660f44ddc7ab1687d5d0d478a1 100644 (file)
@@ -1179,7 +1179,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
 #ifdef MODULE
 static struct net_device *atarilance_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
        atarilance_dev = atarilance_probe(-1);
        if (IS_ERR(atarilance_dev))
@@ -1187,7 +1187,7 @@ int init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(atarilance_dev);
        free_irq(atarilance_dev->irq, atarilance_dev);
index fc2f1d1c7ead437f4665539b9b7e61c461ac527f..5bacb7587df41b77cdc414b278e85750279f4477 100644 (file)
@@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev)
 }
 
 static void
-bnx2_reset_task(void *data)
+bnx2_reset_task(struct work_struct *work)
 {
-       struct bnx2 *bp = data;
+       struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
 
        if (!netif_running(bp->dev))
                return;
@@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->pdev = pdev;
 
        spin_lock_init(&bp->phy_lock);
-       INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
+       INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
        mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
index 488d8ed9e740b93a01aed6192dc2d31e76955af7..6482aed4bb7cddec1c6879e47a3a26a495332359 100644 (file)
@@ -3684,7 +3684,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
                        mii->val_out = 0;
                        read_lock_bh(&bond->lock);
                        read_lock(&bond->curr_slave_lock);
-                       if (bond->curr_active_slave) {
+                       if (netif_carrier_ok(bond->dev)) {
                                mii->val_out = BMSR_LSTATUS;
                        }
                        read_unlock(&bond->curr_slave_lock);
index fd2cc13f7d97b90d200d741207c0f87a61413159..c8126484c2be019350ea65d625676d7444d0af69 100644 (file)
@@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp)
        return 0;
 }
 
-static void cas_reset_task(void *data)
+static void cas_reset_task(struct work_struct *work)
 {
-       struct cas *cp = (struct cas *) data;
+       struct cas *cp = container_of(work, struct cas, reset_task);
 #if 0
        int pending = atomic_read(&cp->reset_task_pending);
 #else
@@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
        atomic_set(&cp->reset_task_pending_spare, 0);
        atomic_set(&cp->reset_task_pending_mtu, 0);
 #endif
-       INIT_WORK(&cp->reset_task, cas_reset_task, cp);
+       INIT_WORK(&cp->reset_task, cas_reset_task);
 
        /* Default link parameters */
        if (link_mode >= 0 && link_mode <= 6)
index b265941e137220a90eb3d8d1588c61cb04ee717d..74758d2c7af8c368ff79b7ab4dc29aa04c7f0ffb 100644 (file)
@@ -279,7 +279,7 @@ struct adapter {
        struct petp   *tp;
 
        struct port_info port[MAX_NPORTS];
-       struct work_struct stats_update_task;
+       struct delayed_work stats_update_task;
        struct timer_list stats_update_timer;
 
        spinlock_t tpi_lock;
index 60901f25014e77f14c28194d6605a17f9932bb26..cf9143499882dc1d128f65ed0031cf919b09775b 100644 (file)
@@ -91,7 +91,7 @@ struct cphy {
        int state;      /* Link status state machine */
        adapter_t *adapter;                  /* associated adapter */
 
-       struct work_struct phy_update;
+       struct delayed_work phy_update;
 
        u16 bmsr;
        int count;
index 53bec6739812366fdf0c1537b54149409d9ae1ec..de48eadddbc4af9fa09452fea78b22ec7f17cf9c 100644 (file)
@@ -953,10 +953,11 @@ static void t1_netpoll(struct net_device *dev)
  * Periodic accumulation of MAC statistics.  This is used only if the MAC
  * does not have any other way to prevent stats counter overflow.
  */
-static void mac_stats_task(void *data)
+static void mac_stats_task(struct work_struct *work)
 {
        int i;
-       struct adapter *adapter = data;
+       struct adapter *adapter =
+               container_of(work, struct adapter, stats_update_task.work);
 
        for_each_port(adapter, i) {
                struct port_info *p = &adapter->port[i];
@@ -977,9 +978,10 @@ static void mac_stats_task(void *data)
 /*
  * Processes elmer0 external interrupts in process context.
  */
-static void ext_intr_task(void *data)
+static void ext_intr_task(struct work_struct *work)
 {
-       struct adapter *adapter = data;
+       struct adapter *adapter =
+               container_of(work, struct adapter, ext_intr_handler_task);
 
        t1_elmer0_ext_intr_handler(adapter);
 
@@ -1113,9 +1115,9 @@ static int __devinit init_one(struct pci_dev *pdev,
                        spin_lock_init(&adapter->mac_lock);
 
                        INIT_WORK(&adapter->ext_intr_handler_task,
-                                 ext_intr_task, adapter);
-                       INIT_WORK(&adapter->stats_update_task, mac_stats_task,
-                                 adapter);
+                                 ext_intr_task);
+                       INIT_DELAYED_WORK(&adapter->stats_update_task,
+                                         mac_stats_task);
 
                        pci_set_drvdata(pdev, netdev);
                }
index 0b90014d5b3e7a4d19f032c679b57c72a3b98f59..c7731b6f9de319c24487c4ea4a500d243925734c 100644 (file)
@@ -93,9 +93,11 @@ static int my3126_interrupt_handler(struct cphy *cphy)
        return cphy_cause_link_change;
 }
 
-static void my3216_poll(void *arg)
+static void my3216_poll(struct work_struct *work)
 {
-       my3126_interrupt_handler(arg);
+       struct cphy *cphy = container_of(work, struct cphy, phy_update.work);
+
+       my3126_interrupt_handler(cphy);
 }
 
 static int my3126_set_loopback(struct cphy *cphy, int on)
@@ -171,7 +173,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
        if (cphy)
                cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
 
-       INIT_WORK(&cphy->phy_update, my3216_poll, cphy);
+       INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
        cphy->bmsr = 0;
 
        return (cphy);
index dec70c2b374a775bb94f27c234948d15b57e6128..4612f71a7106dd45bc43db582c7be5177f39cad0 100644 (file)
@@ -1974,7 +1974,7 @@ out:
        return ret;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        unregister_netdev(dev_cs89x0);
index 690bb40b353dacedf8a6979998ca4f7cc3b1d39d..8396e411f1ce7b59088e2b8f5667ca5d01a789c4 100644 (file)
@@ -43,7 +43,6 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $,  Bjorn Ekwall (bj
  * modify the following "#define": (see <asm/io.h> for more info)
 #define REALLY_SLOW_IO
  */
-#define SLOW_IO_BY_JUMPING /* Looks "better" than dummy write to port 0x80 :-) */
 
 /* use 0 for production, 1 for verification, >2 for debug */
 #ifdef DE600_DEBUG
index 00e2a8a134d7a2993530e29f97ac86d4458bf924..4ae0fed7122e7abbf5498137d26f799cb5976f16 100644 (file)
  *
  *      v0.009: Module support fixes, multiple interfaces support, various
  *              bits. macro
+ *
+ *      v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
+ *              PMAX requirement to only use halfword accesses to the
+ *              buffer. macro
  */
 
 #include <linux/crc32.h>
@@ -54,6 +58,7 @@
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <linux/types.h>
 
 #include <asm/addrspace.h>
 #include <asm/system.h>
@@ -67,7 +72,7 @@
 #include <asm/dec/tc.h>
 
 static char version[] __devinitdata =
-"declance.c: v0.009 by Linux MIPS DECstation task force\n";
+"declance.c: v0.010 by Linux MIPS DECstation task force\n";
 
 MODULE_AUTHOR("Linux MIPS DECstation task force");
 MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
@@ -110,24 +115,25 @@ MODULE_LICENSE("GPL");
 #define        LE_C3_BCON      0x1     /* Byte control */
 
 /* Receive message descriptor 1 */
-#define LE_R1_OWN       0x80   /* Who owns the entry */
-#define LE_R1_ERR       0x40   /* Error: if FRA, OFL, CRC or BUF is set */
-#define LE_R1_FRA       0x20   /* FRA: Frame error */
-#define LE_R1_OFL       0x10   /* OFL: Frame overflow */
-#define LE_R1_CRC       0x08   /* CRC error */
-#define LE_R1_BUF       0x04   /* BUF: Buffer error */
-#define LE_R1_SOP       0x02   /* Start of packet */
-#define LE_R1_EOP       0x01   /* End of packet */
-#define LE_R1_POK       0x03   /* Packet is complete: SOP + EOP */
-
-#define LE_T1_OWN       0x80   /* Lance owns the packet */
-#define LE_T1_ERR       0x40   /* Error summary */
-#define LE_T1_EMORE     0x10   /* Error: more than one retry needed */
-#define LE_T1_EONE      0x08   /* Error: one retry needed */
-#define LE_T1_EDEF      0x04   /* Error: deferred */
-#define LE_T1_SOP       0x02   /* Start of packet */
-#define LE_T1_EOP       0x01   /* End of packet */
-#define LE_T1_POK      0x03    /* Packet is complete: SOP + EOP */
+#define LE_R1_OWN      0x8000  /* Who owns the entry */
+#define LE_R1_ERR      0x4000  /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA      0x2000  /* FRA: Frame error */
+#define LE_R1_OFL      0x1000  /* OFL: Frame overflow */
+#define LE_R1_CRC      0x0800  /* CRC error */
+#define LE_R1_BUF      0x0400  /* BUF: Buffer error */
+#define LE_R1_SOP      0x0200  /* Start of packet */
+#define LE_R1_EOP      0x0100  /* End of packet */
+#define LE_R1_POK      0x0300  /* Packet is complete: SOP + EOP */
+
+/* Transmit message descriptor 1 */
+#define LE_T1_OWN      0x8000  /* Lance owns the packet */
+#define LE_T1_ERR      0x4000  /* Error summary */
+#define LE_T1_EMORE    0x1000  /* Error: more than one retry needed */
+#define LE_T1_EONE     0x0800  /* Error: one retry needed */
+#define LE_T1_EDEF     0x0400  /* Error: deferred */
+#define LE_T1_SOP      0x0200  /* Start of packet */
+#define LE_T1_EOP      0x0100  /* End of packet */
+#define LE_T1_POK      0x0300  /* Packet is complete: SOP + EOP */
 
 #define LE_T3_BUF       0x8000 /* Buffer error */
 #define LE_T3_UFL       0x4000 /* Error underflow */
@@ -156,69 +162,57 @@ MODULE_LICENSE("GPL");
 #undef TEST_HITS
 #define ZERO 0
 
-/* The DS2000/3000 have a linear 64 KB buffer.
-
- * The PMAD-AA has 128 kb buffer on-board.
+/*
+ * The DS2100/3100 have a linear 64 kB buffer which supports halfword
+ * accesses only.  Each halfword of the buffer is word-aligned in the
+ * CPU address space.
  *
- * The IOASIC LANCE devices use a shared memory region. This region as seen
- * from the CPU is (max) 128 KB long and has to be on an 128 KB boundary.
- * The LANCE sees this as a 64 KB long continuous memory region.
+ * The PMAD-AA has a 128 kB buffer on-board.
  *
- * The LANCE's DMA address is used as an index in this buffer and DMA takes
- * place in bursts of eight 16-Bit words which are packed into four 32-Bit words
- * by the IOASIC. This leads to a strange padding: 16 bytes of valid data followed
- * by a 16 byte gap :-(.
+ * The IOASIC LANCE devices use a shared memory region.  This region
+ * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
+ * boundary.  The LANCE sees this as a 64 kB long continuous memory
+ * region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA
+ * takes place in bursts of eight 16-bit words which are packed into
+ * four 32-bit words by the IOASIC.  This leads to a strange padding:
+ * 16 bytes of valid data followed by a 16 byte gap :-(.
  */
 
 struct lance_rx_desc {
        unsigned short rmd0;            /* low address of packet */
-       short gap0;
-       unsigned char rmd1_hadr;        /* high address of packet */
-       unsigned char rmd1_bits;        /* descriptor bits */
-       short gap1;
+       unsigned short rmd1;            /* high address of packet
+                                          and descriptor bits */
        short length;                   /* 2s complement (negative!)
                                           of buffer length */
-       short gap2;
        unsigned short mblength;        /* actual number of bytes received */
-       short gap3;
 };
 
 struct lance_tx_desc {
        unsigned short tmd0;            /* low address of packet */
-       short gap0;
-       unsigned char tmd1_hadr;        /* high address of packet */
-       unsigned char tmd1_bits;        /* descriptor bits */
-       short gap1;
+       unsigned short tmd1;            /* high address of packet
+                                          and descriptor bits */
        short length;                   /* 2s complement (negative!)
                                           of buffer length */
-       short gap2;
        unsigned short misc;
-       short gap3;
 };
 
 
 /* First part of the LANCE initialization block, described in databook. */
 struct lance_init_block {
        unsigned short mode;            /* pre-set mode (reg. 15) */
-       short gap0;
 
-       unsigned char phys_addr[12];    /* physical ethernet address
-                                          only 0, 1, 4, 5, 8, 9 are valid
-                                          2, 3, 6, 7, 10, 11 are gaps */
-       unsigned short filter[8];       /* multicast filter
-                                          only 0, 2, 4, 6 are valid
-                                          1, 3, 5, 7 are gaps */
+       unsigned short phys_addr[3];    /* physical ethernet address */
+       unsigned short filter[4];       /* multicast filter */
 
        /* Receive and transmit ring base, along with extra bits. */
        unsigned short rx_ptr;          /* receive descriptor addr */
-       short gap1;
        unsigned short rx_len;          /* receive len and high addr */
-       short gap2;
        unsigned short tx_ptr;          /* transmit descriptor addr */
-       short gap3;
        unsigned short tx_len;          /* transmit len and high addr */
-       short gap4;
-       short gap5[8];
+
+       short gap[4];
 
        /* The buffer descriptors */
        struct lance_rx_desc brx_ring[RX_RING_SIZE];
@@ -226,15 +220,28 @@ struct lance_init_block {
 };
 
 #define BUF_OFFSET_CPU sizeof(struct lance_init_block)
-#define BUF_OFFSET_LNC (sizeof(struct lance_init_block)>>1)
+#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
 
-#define libdesc_offset(rt, elem) \
-((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+#define shift_off(off, type)                                           \
+       (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
 
-/*
- * This works *only* for the ring descriptors
- */
-#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1)
+#define lib_off(rt, type)                                              \
+       shift_off(offsetof(struct lance_init_block, rt), type)
+
+#define lib_ptr(ib, rt, type)                                          \
+       ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
+
+#define rds_off(rt, type)                                              \
+       shift_off(offsetof(struct lance_rx_desc, rt), type)
+
+#define rds_ptr(rd, rt, type)                                          \
+       ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
+
+#define tds_off(rt, type)                                              \
+       shift_off(offsetof(struct lance_tx_desc, rt), type)
+
+#define tds_ptr(td, rt, type)                                          \
+       ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
 
 struct lance_private {
        struct net_device *next;
@@ -242,7 +249,6 @@ struct lance_private {
        int slot;
        int dma_irq;
        volatile struct lance_regs *ll;
-       volatile struct lance_init_block *init_block;
 
        spinlock_t      lock;
 
@@ -260,8 +266,8 @@ struct lance_private {
        char *tx_buf_ptr_cpu[TX_RING_SIZE];
 
        /* Pointers to the ring buffers as seen from the LANCE */
-       char *rx_buf_ptr_lnc[RX_RING_SIZE];
-       char *tx_buf_ptr_lnc[TX_RING_SIZE];
+       uint rx_buf_ptr_lnc[RX_RING_SIZE];
+       uint tx_buf_ptr_lnc[TX_RING_SIZE];
 };
 
 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
@@ -294,7 +300,7 @@ static inline void writereg(volatile unsigned short *regptr, short value)
 static void load_csrs(struct lance_private *lp)
 {
        volatile struct lance_regs *ll = lp->ll;
-       int leptr;
+       uint leptr;
 
        /* The address space as seen from the LANCE
         * begins at address 0. HK
@@ -316,12 +322,14 @@ static void load_csrs(struct lance_private *lp)
  * Our specialized copy routines
  *
  */
-void cp_to_buf(const int type, void *to, const void *from, int len)
+static void cp_to_buf(const int type, void *to, const void *from, int len)
 {
        unsigned short *tp, *fp, clen;
        unsigned char *rtp, *rfp;
 
-       if (type == PMAX_LANCE) {
+       if (type == PMAD_LANCE) {
+               memcpy(to, from, len);
+       } else if (type == PMAX_LANCE) {
                clen = len >> 1;
                tp = (unsigned short *) to;
                fp = (unsigned short *) from;
@@ -370,12 +378,14 @@ void cp_to_buf(const int type, void *to, const void *from, int len)
        iob();
 }
 
-void cp_from_buf(const int type, void *to, const void *from, int len)
+static void cp_from_buf(const int type, void *to, const void *from, int len)
 {
        unsigned short *tp, *fp, clen;
        unsigned char *rtp, *rfp;
 
-       if (type == PMAX_LANCE) {
+       if (type == PMAD_LANCE) {
+               memcpy(to, from, len);
+       } else if (type == PMAX_LANCE) {
                clen = len >> 1;
                tp = (unsigned short *) to;
                fp = (unsigned short *) from;
@@ -431,12 +441,10 @@ void cp_from_buf(const int type, void *to, const void *from, int len)
 static void lance_init_ring(struct net_device *dev)
 {
        struct lance_private *lp = netdev_priv(dev);
-       volatile struct lance_init_block *ib;
-       int leptr;
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
+       uint leptr;
        int i;
 
-       ib = (struct lance_init_block *) (dev->mem_start);
-
        /* Lock out other processes while setting up hardware */
        netif_stop_queue(dev);
        lp->rx_new = lp->tx_new = 0;
@@ -445,55 +453,64 @@ static void lance_init_ring(struct net_device *dev)
        /* Copy the ethernet address to the lance init block.
         * XXX bit 0 of the physical address registers has to be zero
         */
-       ib->phys_addr[0] = dev->dev_addr[0];
-       ib->phys_addr[1] = dev->dev_addr[1];
-       ib->phys_addr[4] = dev->dev_addr[2];
-       ib->phys_addr[5] = dev->dev_addr[3];
-       ib->phys_addr[8] = dev->dev_addr[4];
-       ib->phys_addr[9] = dev->dev_addr[5];
+       *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
+                                    dev->dev_addr[0];
+       *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
+                                    dev->dev_addr[2];
+       *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
+                                    dev->dev_addr[4];
        /* Setup the initialization block */
 
        /* Setup rx descriptor pointer */
-       leptr = LANCE_ADDR(libdesc_offset(brx_ring, 0));
-       ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
-       ib->rx_ptr = leptr;
+       leptr = offsetof(struct lance_init_block, brx_ring);
+       *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
+                                        (leptr >> 16);
+       *lib_ptr(ib, rx_ptr, lp->type) = leptr;
        if (ZERO)
-               printk("RX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(brx_ring, 0));
+               printk("RX ptr: %8.8x(%8.8x)\n",
+                      leptr, lib_off(brx_ring, lp->type));
 
        /* Setup tx descriptor pointer */
-       leptr = LANCE_ADDR(libdesc_offset(btx_ring, 0));
-       ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
-       ib->tx_ptr = leptr;
+       leptr = offsetof(struct lance_init_block, btx_ring);
+       *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
+                                        (leptr >> 16);
+       *lib_ptr(ib, tx_ptr, lp->type) = leptr;
        if (ZERO)
-               printk("TX ptr: %8.8x(%8.8x)\n", leptr, libdesc_offset(btx_ring, 0));
+               printk("TX ptr: %8.8x(%8.8x)\n",
+                      leptr, lib_off(btx_ring, lp->type));
 
        if (ZERO)
                printk("TX rings:\n");
 
        /* Setup the Tx ring entries */
        for (i = 0; i < TX_RING_SIZE; i++) {
-               leptr = (int) lp->tx_buf_ptr_lnc[i];
-               ib->btx_ring[i].tmd0 = leptr;
-               ib->btx_ring[i].tmd1_hadr = leptr >> 16;
-               ib->btx_ring[i].tmd1_bits = 0;
-               ib->btx_ring[i].length = 0xf000;        /* The ones required by tmd2 */
-               ib->btx_ring[i].misc = 0;
+               leptr = lp->tx_buf_ptr_lnc[i];
+               *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
+               *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
+                                                          0xff;
+               *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
+                                               /* The ones required by tmd2 */
+               *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->tx_buf_ptr_cpu[i]);
+                       printk("%d: 0x%8.8x(0x%8.8x)\n",
+                              i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
        }
 
        /* Setup the Rx ring entries */
        if (ZERO)
                printk("RX rings:\n");
        for (i = 0; i < RX_RING_SIZE; i++) {
-               leptr = (int) lp->rx_buf_ptr_lnc[i];
-               ib->brx_ring[i].rmd0 = leptr;
-               ib->brx_ring[i].rmd1_hadr = leptr >> 16;
-               ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
-               ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
-               ib->brx_ring[i].mblength = 0;
+               leptr = lp->rx_buf_ptr_lnc[i];
+               *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
+               *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
+                                                           0xff) |
+                                                          LE_R1_OWN;
+               *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
+                                                            0xf000;
+               *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n", i, leptr, (int) lp->rx_buf_ptr_cpu[i]);
+                       printk("%d: 0x%8.8x(0x%8.8x)\n",
+                              i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
        }
        iob();
 }
@@ -511,11 +528,13 @@ static int init_restart_lance(struct lance_private *lp)
                udelay(10);
        }
        if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
-               printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+               printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+                      i, ll->rdp);
                return -1;
        }
        if ((ll->rdp & LE_C0_ERR)) {
-               printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, ll->rdp);
+               printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+                      i, ll->rdp);
                return -1;
        }
        writereg(&ll->rdp, LE_C0_IDON);
@@ -528,12 +547,11 @@ static int init_restart_lance(struct lance_private *lp)
 static int lance_rx(struct net_device *dev)
 {
        struct lance_private *lp = netdev_priv(dev);
-       volatile struct lance_init_block *ib;
-       volatile struct lance_rx_desc *rd = 0;
-       unsigned char bits;
-       int len = 0;
-       struct sk_buff *skb = 0;
-       ib = (struct lance_init_block *) (dev->mem_start);
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
+       volatile u16 *rd;
+       unsigned short bits;
+       int entry, len;
+       struct sk_buff *skb;
 
 #ifdef TEST_HITS
        {
@@ -542,19 +560,22 @@ static int lance_rx(struct net_device *dev)
                printk("[");
                for (i = 0; i < RX_RING_SIZE; i++) {
                        if (i == lp->rx_new)
-                               printk("%s", ib->brx_ring[i].rmd1_bits &
+                               printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+                                                     lp->type) &
                                             LE_R1_OWN ? "_" : "X");
                        else
-                               printk("%s", ib->brx_ring[i].rmd1_bits &
+                               printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+                                                     lp->type) &
                                             LE_R1_OWN ? "." : "1");
                }
                printk("]");
        }
 #endif
 
-       for (rd = &ib->brx_ring[lp->rx_new];
-            !((bits = rd->rmd1_bits) & LE_R1_OWN);
-            rd = &ib->brx_ring[lp->rx_new]) {
+       for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
+            !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
+            rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
+               entry = lp->rx_new;
 
                /* We got an incomplete frame? */
                if ((bits & LE_R1_POK) != LE_R1_POK) {
@@ -575,16 +596,18 @@ static int lance_rx(struct net_device *dev)
                        if (bits & LE_R1_EOP)
                                lp->stats.rx_errors++;
                } else {
-                       len = (rd->mblength & 0xfff) - 4;
+                       len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
                        skb = dev_alloc_skb(len + 2);
 
                        if (skb == 0) {
                                printk("%s: Memory squeeze, deferring packet.\n",
                                       dev->name);
                                lp->stats.rx_dropped++;
-                               rd->mblength = 0;
-                               rd->rmd1_bits = LE_R1_OWN;
-                               lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+                               *rds_ptr(rd, mblength, lp->type) = 0;
+                               *rds_ptr(rd, rmd1, lp->type) =
+                                       ((lp->rx_buf_ptr_lnc[entry] >> 16) &
+                                        0xff) | LE_R1_OWN;
+                               lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
                                return 0;
                        }
                        lp->stats.rx_bytes += len;
@@ -594,8 +617,7 @@ static int lance_rx(struct net_device *dev)
                        skb_put(skb, len);      /* make room */
 
                        cp_from_buf(lp->type, skb->data,
-                                   (char *)lp->rx_buf_ptr_cpu[lp->rx_new],
-                                   len);
+                                   (char *)lp->rx_buf_ptr_cpu[entry], len);
 
                        skb->protocol = eth_type_trans(skb, dev);
                        netif_rx(skb);
@@ -604,10 +626,11 @@ static int lance_rx(struct net_device *dev)
                }
 
                /* Return the packet to the pool */
-               rd->mblength = 0;
-               rd->length = -RX_BUFF_SIZE | 0xf000;
-               rd->rmd1_bits = LE_R1_OWN;
-               lp->rx_new = (lp->rx_new + 1) & RX_RING_MOD_MASK;
+               *rds_ptr(rd, mblength, lp->type) = 0;
+               *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
+               *rds_ptr(rd, rmd1, lp->type) =
+                       ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
+               lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
        }
        return 0;
 }
@@ -615,24 +638,24 @@ static int lance_rx(struct net_device *dev)
 static void lance_tx(struct net_device *dev)
 {
        struct lance_private *lp = netdev_priv(dev);
-       volatile struct lance_init_block *ib;
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
        volatile struct lance_regs *ll = lp->ll;
-       volatile struct lance_tx_desc *td;
+       volatile u16 *td;
        int i, j;
        int status;
-       ib = (struct lance_init_block *) (dev->mem_start);
+
        j = lp->tx_old;
 
        spin_lock(&lp->lock);
 
        for (i = j; i != lp->tx_new; i = j) {
-               td = &ib->btx_ring[i];
+               td = lib_ptr(ib, btx_ring[i], lp->type);
                /* If we hit a packet not owned by us, stop */
-               if (td->tmd1_bits & LE_T1_OWN)
+               if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
                        break;
 
-               if (td->tmd1_bits & LE_T1_ERR) {
-                       status = td->misc;
+               if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
+                       status = *tds_ptr(td, misc, lp->type);
 
                        lp->stats.tx_errors++;
                        if (status & LE_T3_RTY)
@@ -667,18 +690,19 @@ static void lance_tx(struct net_device *dev)
                                init_restart_lance(lp);
                                goto out;
                        }
-               } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+               } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
+                          LE_T1_POK) {
                        /*
                         * So we don't count the packet more than once.
                         */
-                       td->tmd1_bits &= ~(LE_T1_POK);
+                       *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
 
                        /* One collision before packet was sent. */
-                       if (td->tmd1_bits & LE_T1_EONE)
+                       if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
                                lp->stats.collisions++;
 
                        /* More than one collision, be optimistic. */
-                       if (td->tmd1_bits & LE_T1_EMORE)
+                       if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
                                lp->stats.collisions += 2;
 
                        lp->stats.tx_packets++;
@@ -752,7 +776,7 @@ struct net_device *last_dev = 0;
 
 static int lance_open(struct net_device *dev)
 {
-       volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
        struct lance_private *lp = netdev_priv(dev);
        volatile struct lance_regs *ll = lp->ll;
        int status = 0;
@@ -769,11 +793,11 @@ static int lance_open(struct net_device *dev)
         *
         * BTW it is common bug in all lance drivers! --ANK
         */
-       ib->mode = 0;
-       ib->filter [0] = 0;
-       ib->filter [2] = 0;
-       ib->filter [4] = 0;
-       ib->filter [6] = 0;
+       *lib_ptr(ib, mode, lp->type) = 0;
+       *lib_ptr(ib, filter[0], lp->type) = 0;
+       *lib_ptr(ib, filter[1], lp->type) = 0;
+       *lib_ptr(ib, filter[2], lp->type) = 0;
+       *lib_ptr(ib, filter[3], lp->type) = 0;
 
        lance_init_ring(dev);
        load_csrs(lp);
@@ -874,12 +898,10 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct lance_private *lp = netdev_priv(dev);
        volatile struct lance_regs *ll = lp->ll;
-       volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
-       int entry, skblen, len;
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
+       int entry, len;
 
-       skblen = skb->len;
-
-       len = skblen;
+       len = skb->len;
 
        if (len < ETH_ZLEN) {
                if (skb_padto(skb, ETH_ZLEN))
@@ -889,23 +911,17 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        lp->stats.tx_bytes += len;
 
-       entry = lp->tx_new & TX_RING_MOD_MASK;
-       ib->btx_ring[entry].length = (-len);
-       ib->btx_ring[entry].misc = 0;
-
-       cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data,
-                 skblen);
+       entry = lp->tx_new;
+       *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
+       *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
 
-       /* Clear the slack of the packet, do I need this? */
-       /* For a firewall it's a good idea - AC */
-/*
-   if (len != skblen)
-   memset ((char *) &ib->tx_buf [entry][skblen], 0, (len - skblen) << 1);
- */
+       cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
 
        /* Now, give the packet to the lance */
-       ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
-       lp->tx_new = (lp->tx_new + 1) & TX_RING_MOD_MASK;
+       *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+               ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
+               (LE_T1_POK | LE_T1_OWN);
+       lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
 
        if (TX_BUFFS_AVAIL <= 0)
                netif_stop_queue(dev);
@@ -930,8 +946,8 @@ static struct net_device_stats *lance_get_stats(struct net_device *dev)
 
 static void lance_load_multicast(struct net_device *dev)
 {
-       volatile struct lance_init_block *ib = (struct lance_init_block *) (dev->mem_start);
-       volatile u16 *mcast_table = (u16 *) & ib->filter;
+       struct lance_private *lp = netdev_priv(dev);
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
        struct dev_mc_list *dmi = dev->mc_list;
        char *addrs;
        int i;
@@ -939,17 +955,17 @@ static void lance_load_multicast(struct net_device *dev)
 
        /* set all multicast bits */
        if (dev->flags & IFF_ALLMULTI) {
-               ib->filter[0] = 0xffff;
-               ib->filter[2] = 0xffff;
-               ib->filter[4] = 0xffff;
-               ib->filter[6] = 0xffff;
+               *lib_ptr(ib, filter[0], lp->type) = 0xffff;
+               *lib_ptr(ib, filter[1], lp->type) = 0xffff;
+               *lib_ptr(ib, filter[2], lp->type) = 0xffff;
+               *lib_ptr(ib, filter[3], lp->type) = 0xffff;
                return;
        }
        /* clear the multicast filter */
-       ib->filter[0] = 0;
-       ib->filter[2] = 0;
-       ib->filter[4] = 0;
-       ib->filter[6] = 0;
+       *lib_ptr(ib, filter[0], lp->type) = 0;
+       *lib_ptr(ib, filter[1], lp->type) = 0;
+       *lib_ptr(ib, filter[2], lp->type) = 0;
+       *lib_ptr(ib, filter[3], lp->type) = 0;
 
        /* Add addresses */
        for (i = 0; i < dev->mc_count; i++) {
@@ -962,7 +978,7 @@ static void lance_load_multicast(struct net_device *dev)
 
                crc = ether_crc_le(ETH_ALEN, addrs);
                crc = crc >> 26;
-               mcast_table[2 * (crc >> 4)] |= 1 << (crc & 0xf);
+               *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
        }
        return;
 }
@@ -970,11 +986,9 @@ static void lance_load_multicast(struct net_device *dev)
 static void lance_set_multicast(struct net_device *dev)
 {
        struct lance_private *lp = netdev_priv(dev);
-       volatile struct lance_init_block *ib;
+       volatile u16 *ib = (volatile u16 *)dev->mem_start;
        volatile struct lance_regs *ll = lp->ll;
 
-       ib = (struct lance_init_block *) (dev->mem_start);
-
        if (!netif_running(dev))
                return;
 
@@ -992,9 +1006,9 @@ static void lance_set_multicast(struct net_device *dev)
        lance_init_ring(dev);
 
        if (dev->flags & IFF_PROMISC) {
-               ib->mode |= LE_MO_PROM;
+               *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
        } else {
-               ib->mode &= ~LE_MO_PROM;
+               *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
                lance_load_multicast(dev);
        }
        load_csrs(lp);
@@ -1051,7 +1065,6 @@ static int __init dec_lance_init(const int type, const int slot)
        lp->type = type;
        lp->slot = slot;
        switch (type) {
-#ifdef CONFIG_TC
        case ASIC_LANCE:
                dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
 
@@ -1073,20 +1086,20 @@ static int __init dec_lance_init(const int type, const int slot)
                 */
                for (i = 0; i < RX_RING_SIZE; i++) {
                        lp->rx_buf_ptr_cpu[i] =
-                               (char *)(dev->mem_start + BUF_OFFSET_CPU +
+                               (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
                                         2 * i * RX_BUFF_SIZE);
                        lp->rx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
                }
                for (i = 0; i < TX_RING_SIZE; i++) {
                        lp->tx_buf_ptr_cpu[i] =
-                               (char *)(dev->mem_start + BUF_OFFSET_CPU +
+                               (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
                                         2 * i * TX_BUFF_SIZE);
                        lp->tx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC +
-                                        RX_RING_SIZE * RX_BUFF_SIZE +
-                                        i * TX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC +
+                                RX_RING_SIZE * RX_BUFF_SIZE +
+                                i * TX_BUFF_SIZE);
                }
 
                /* Setup I/O ASIC LANCE DMA.  */
@@ -1095,11 +1108,12 @@ static int __init dec_lance_init(const int type, const int slot)
                             CPHYSADDR(dev->mem_start) << 3);
 
                break;
-
+#ifdef CONFIG_TC
        case PMAD_LANCE:
                claim_tc_card(slot);
 
                dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
+               dev->mem_end = dev->mem_start + 0x100000;
                dev->base_addr = dev->mem_start + 0x100000;
                dev->irq = get_tc_irq_nr(slot);
                esar_base = dev->mem_start + 0x1c0002;
@@ -1110,7 +1124,7 @@ static int __init dec_lance_init(const int type, const int slot)
                                (char *)(dev->mem_start + BUF_OFFSET_CPU +
                                         i * RX_BUFF_SIZE);
                        lp->rx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
                }
                for (i = 0; i < TX_RING_SIZE; i++) {
                        lp->tx_buf_ptr_cpu[i] =
@@ -1118,18 +1132,18 @@ static int __init dec_lance_init(const int type, const int slot)
                                         RX_RING_SIZE * RX_BUFF_SIZE +
                                         i * TX_BUFF_SIZE);
                        lp->tx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC +
-                                        RX_RING_SIZE * RX_BUFF_SIZE +
-                                        i * TX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC +
+                                RX_RING_SIZE * RX_BUFF_SIZE +
+                                i * TX_BUFF_SIZE);
                }
 
                break;
 #endif
-
        case PMAX_LANCE:
                dev->irq = dec_interrupt[DEC_IRQ_LANCE];
                dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
                dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+               dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
                esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
                lp->dma_irq = -1;
 
@@ -1138,20 +1152,20 @@ static int __init dec_lance_init(const int type, const int slot)
                 */
                for (i = 0; i < RX_RING_SIZE; i++) {
                        lp->rx_buf_ptr_cpu[i] =
-                               (char *)(dev->mem_start + BUF_OFFSET_CPU +
+                               (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
                                         2 * i * RX_BUFF_SIZE);
                        lp->rx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
                }
                for (i = 0; i < TX_RING_SIZE; i++) {
                        lp->tx_buf_ptr_cpu[i] =
-                               (char *)(dev->mem_start + BUF_OFFSET_CPU +
+                               (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
                                         2 * RX_RING_SIZE * RX_BUFF_SIZE +
                                         2 * i * TX_BUFF_SIZE);
                        lp->tx_buf_ptr_lnc[i] =
-                               (char *)(BUF_OFFSET_LNC +
-                                        RX_RING_SIZE * RX_BUFF_SIZE +
-                                        i * TX_BUFF_SIZE);
+                               (BUF_OFFSET_LNC +
+                                RX_RING_SIZE * RX_BUFF_SIZE +
+                                i * TX_BUFF_SIZE);
                }
 
                break;
@@ -1279,10 +1293,8 @@ static int __init dec_lance_probe(void)
        /* Then handle onboard devices. */
        if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
                if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
-#ifdef CONFIG_TC
                        if (dec_lance_init(ASIC_LANCE, -1) >= 0)
                                count++;
-#endif
                } else if (!TURBOCHANNEL) {
                        if (dec_lance_init(PMAX_LANCE, -1) >= 0)
                                count++;
index 3a8df479cbdad660351a676b54912074fb597062..03bf164f9e8db32f3eb1fdafe1e3fffb0b3d26fb 100644 (file)
@@ -2102,9 +2102,10 @@ static void e100_tx_timeout(struct net_device *netdev)
        schedule_work(&nic->tx_timeout_task);
 }
 
-static void e100_tx_timeout_task(struct net_device *netdev)
+static void e100_tx_timeout_task(struct work_struct *work)
 {
-       struct nic *nic = netdev_priv(netdev);
+       struct nic *nic = container_of(work, struct nic, tx_timeout_task);
+       struct net_device *netdev = nic->netdev;
 
        DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
                readb(&nic->csr->scb.status));
@@ -2637,8 +2638,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
        nic->blink_timer.function = e100_blink_led;
        nic->blink_timer.data = (unsigned long)nic;
 
-       INIT_WORK(&nic->tx_timeout_task,
-               (void (*)(void *))e100_tx_timeout_task, netdev);
+       INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
        if((err = e100_alloc(nic))) {
                DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
index 32dde0adb6839c107fad7ce9cbd32ec7686674e1..73f3a85fd2384f82b3f57ea83722e7adb9969de2 100644 (file)
@@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev);
 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 static void e1000_tx_timeout(struct net_device *dev);
-static void e1000_reset_task(struct net_device *dev);
+static void e1000_reset_task(struct work_struct *work);
 static void e1000_smartspeed(struct e1000_adapter *adapter);
 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
                                        struct sk_buff *skb);
@@ -914,8 +914,7 @@ e1000_probe(struct pci_dev *pdev,
        adapter->phy_info_timer.function = &e1000_update_phy_info;
        adapter->phy_info_timer.data = (unsigned long) adapter;
 
-       INIT_WORK(&adapter->reset_task,
-               (void (*)(void *))e1000_reset_task, netdev);
+       INIT_WORK(&adapter->reset_task, e1000_reset_task);
 
        e1000_check_options(adapter);
 
@@ -3306,9 +3305,10 @@ e1000_tx_timeout(struct net_device *netdev)
 }
 
 static void
-e1000_reset_task(struct net_device *netdev)
+e1000_reset_task(struct work_struct *work)
 {
-       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_adapter *adapter =
+               container_of(work, struct e1000_adapter, reset_task);
 
        e1000_reinit_locked(adapter);
 }
index d39e8480ca5652537fd68da4c526eb20ca92e236..c62d9c6363c60c94093ee2a1a0cac86ec8ed02b1 100644 (file)
@@ -463,7 +463,7 @@ static void cleanup_card(struct net_device *dev)
        release_region(dev->base_addr, E21_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index a4eb0dc99ecffe42c92b87a1bacac5c72234caa1..b4463094c93abe8e04a8dd4313170279d1877057 100644 (file)
@@ -1827,7 +1827,7 @@ int __init init_module(void)
        return n_eepro ? 0 : -ENODEV;
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int i;
index e14be020e5624b2f5f999b1d90a0771cb0004f69..4a50fcb5ad6b6538b62fb58530182ecc2051e17c 100644 (file)
@@ -1719,7 +1719,7 @@ int __init init_module(void)
        return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index 6ad696101418f2ac22da0131fc8bd559ad48c32b..83fa32f7239857c9d6a72f05eca538343b00c76b 100644 (file)
@@ -2224,11 +2224,12 @@ static int ehea_stop(struct net_device *dev)
        return ret;
 }
 
-static void ehea_reset_port(void *data)
+static void ehea_reset_port(struct work_struct *work)
 {
        int ret;
-       struct net_device *dev = data;
-       struct ehea_port *port = netdev_priv(dev);
+       struct ehea_port *port =
+               container_of(work, struct ehea_port, reset_task);
+       struct net_device *dev = port->netdev;
 
        port->resets++;
        down(&port->port_lock);
@@ -2379,7 +2380,7 @@ static int ehea_setup_single_port(struct ehea_port *port,
        dev->tx_timeout = &ehea_tx_watchdog;
        dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
-       INIT_WORK(&port->reset_task, ehea_reset_port, dev);
+       INIT_WORK(&port->reset_task, ehea_reset_port);
 
        ehea_set_ethtool_ops(dev);
 
index fd7b32a24ea4f33d9cf1461daff5433b2fcc5a36..2d2ea94a00bb72cd7568d4a4f2d185be068aec8b 100644 (file)
@@ -455,7 +455,7 @@ static void cleanup_card(struct net_device *dev)
        iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index b7b8bc2a6307ef7215df4d4390ff4c53f8fc9934..93283e386f3a71d44bf6d987cade944146992753 100644 (file)
@@ -1475,7 +1475,7 @@ int __init init_module(void)
        return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index 1ed9cccd3c11ea5bdca936de6abbe1c6bc6e4e3d..3c33d6f6a6a662cb6563d144c87cab1e51c39b2c 100644 (file)
@@ -168,8 +168,9 @@ struct baycom_state {
        int magic;
 
         struct pardevice *pdev;
+       struct net_device *dev;
        unsigned int work_running;
-       struct work_struct run_work;
+       struct delayed_work run_work;
        unsigned int modem;
        unsigned int bitrate;
        unsigned char stat;
@@ -659,16 +660,18 @@ static int receive(struct net_device *dev, int cnt)
 #define GETTICK(x)
 #endif /* __i386__ */
 
-static void epp_bh(struct net_device *dev)
+static void epp_bh(struct work_struct *work)
 {
+       struct net_device *dev;
        struct baycom_state *bc;
        struct parport *pp;
        unsigned char stat;
        unsigned char tmp[2];
        unsigned int time1 = 0, time2 = 0, time3 = 0;
        int cnt, cnt2;
-       
-       bc = netdev_priv(dev);
+
+       bc = container_of(work, struct baycom_state, run_work.work);
+       dev = bc->dev;
        if (!bc->work_running)
                return;
        baycom_int_freq(bc);
@@ -889,7 +892,7 @@ static int epp_open(struct net_device *dev)
                 return -EBUSY;
         }
         dev->irq = /*pp->irq*/ 0;
-       INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev);
+       INIT_DELAYED_WORK(&bc->run_work, epp_bh);
        bc->work_running = 1;
        bc->modem = EPP_CONVENTIONAL;
        if (eppconfig(bc))
@@ -1213,6 +1216,7 @@ static void __init baycom_epp_dev_setup(struct net_device *dev)
        /*
         * initialize part of the baycom_state struct
         */
+       bc->dev = dev;
        bc->magic = BAYCOM_MAGIC;
        bc->cfg.fclk = 19666600;
        bc->cfg.bps = 9600;
index 0f8b9afd55b43c2b739713252a4aab37be086f3b..e6e721aff6f688baa925754b1df54ef8d12cce41 100644 (file)
@@ -252,7 +252,7 @@ static inline void z8530_isr(struct scc_info *info);
 static irqreturn_t scc_isr(int irq, void *dev_id);
 static void rx_isr(struct scc_priv *priv);
 static void special_condition(struct scc_priv *priv, int rc);
-static void rx_bh(void *arg);
+static void rx_bh(struct work_struct *);
 static void tx_isr(struct scc_priv *priv);
 static void es_isr(struct scc_priv *priv);
 static void tm_isr(struct scc_priv *priv);
@@ -579,7 +579,7 @@ static int __init setup_adapter(int card_base, int type, int n)
                priv->param.clocks = TCTRxCP | RCRTxCP;
                priv->param.persist = 256;
                priv->param.dma = -1;
-               INIT_WORK(&priv->rx_work, rx_bh, priv);
+               INIT_WORK(&priv->rx_work, rx_bh);
                dev->priv = priv;
                sprintf(dev->name, "dmascc%i", 2 * n + i);
                dev->base_addr = card_base;
@@ -1272,9 +1272,9 @@ static void special_condition(struct scc_priv *priv, int rc)
 }
 
 
-static void rx_bh(void *arg)
+static void rx_bh(struct work_struct *ugli_api)
 {
-       struct scc_priv *priv = arg;
+       struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
        int i = priv->rx_tail;
        int cb;
        unsigned long flags;
index 6abcfd2a4b2805825e92736eab16c97e26b5b29c..99a36cc3f8df4dddcba0d4f5e70c7847628e6bcb 100644 (file)
@@ -482,7 +482,7 @@ static void cleanup_card(struct net_device *dev)
        release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index 29470970aa2707e3a7286358be76750f0bed6cc5..635b13c2e2aac856f786253f07a6cd95a48a9edf 100644 (file)
@@ -444,7 +444,7 @@ static void cleanup_card(struct net_device *dev)
        release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index f73f10a0a56217c13f378dd5466209f33e2d73d4..407d2acbf7c79c057e18810c89254ee86b1699a3 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/netdevice.h>
 
 #include <asm/io.h>
+#include <asm/dcr.h>
 
 /*
  * These MAL "versions" probably aren't the real versions IBM uses for these 
@@ -191,6 +192,7 @@ struct mal_commac {
 
 struct ibm_ocp_mal {
        int                     dcrbase;
+       dcr_host_t              dcrhost;
 
        struct list_head        poll_list;
        struct net_device       poll_dev;
@@ -207,12 +209,12 @@ struct ibm_ocp_mal {
 
 static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
 {
-       return mfdcr(mal->dcrbase + reg);
+       return dcr_read(mal->dcrhost, mal->dcrbase + reg);
 }
 
 static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
 {
-       mtdcr(mal->dcrbase + reg, val);
+       dcr_write(mal->dcrhost, mal->dcrbase + reg, val);
 }
 
 /* Register MAL devices */
index 44c9f993dcc4fbe657b216e3c3a8e7a3276c6d97..99343b5836b87bfce57e48046570c61ecc4ffc0b 100644 (file)
@@ -50,7 +50,6 @@
 #include <asm/semaphore.h>
 #include <asm/hvcall.h>
 #include <asm/atomic.h>
-#include <asm/iommu.h>
 #include <asm/vio.h>
 #include <asm/uaccess.h>
 #include <linux/seq_file.h>
@@ -1000,8 +999,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
        adapter->mac_addr = 0;
        memcpy(&adapter->mac_addr, mac_addr_p, 6);
 
-       adapter->liobn = dev->iommu_table->it_index;
-
        netdev->irq = dev->irq;
        netdev->open               = ibmveth_open;
        netdev->poll               = ibmveth_poll;
@@ -1115,7 +1112,6 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
 
        seq_printf(seq, "Unit Address:    0x%x\n", adapter->vdev->unit_address);
-       seq_printf(seq, "LIOBN:           0x%lx\n", adapter->liobn);
        seq_printf(seq, "Current MAC:     %02X:%02X:%02X:%02X:%02X:%02X\n",
                   current_mac[0], current_mac[1], current_mac[2],
                   current_mac[3], current_mac[4], current_mac[5]);
index f5b25bff15403336de88df991b1b51cf5b8ff95e..bb69ccae8aceaac5b478efb24fa94f1516b366ae 100644 (file)
@@ -118,7 +118,6 @@ struct ibmveth_adapter {
     struct net_device_stats stats;
     unsigned int mcastFilterSize;
     unsigned long mac_addr;
-    unsigned long liobn;
     void * buffer_list_addr;
     void * filter_list_addr;
     dma_addr_t buffer_list_dma;
index b32c52ed19d76c62b7503a3925b294148a4165fc..f0c61f3b2a82e992fee61cdf664dd2d42e62cfdc 100644 (file)
@@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs,
        return ret;
 }
 
-static void mcs_speed_work(void *arg)
+static void mcs_speed_work(struct work_struct *work)
 {
-       struct mcs_cb *mcs = arg;
+       struct mcs_cb *mcs = container_of(work, struct mcs_cb, work);
        struct net_device *netdev = mcs->netdev;
 
        mcs_speed_change(mcs);
@@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf,
        irda_qos_bits_to_value(&mcs->qos);
 
        /* Speed change work initialisation*/
-       INIT_WORK(&mcs->work, mcs_speed_work, mcs);
+       INIT_WORK(&mcs->work, mcs_speed_work);
 
        /* Override the network functions we need to use */
        ndev->hard_start_xmit = mcs_hard_xmit;
index f9a1c88a42831c542d59931b55ce6c74397d60e5..9137e239fac2e55120fed2520db2dccbe5bfcbe9 100644 (file)
@@ -704,9 +704,9 @@ static int pxa_irda_stop(struct net_device *dev)
        return 0;
 }
 
-static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
+static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
 {
-       struct net_device *dev = dev_get_drvdata(_dev);
+       struct net_device *dev = platform_get_drvdata(_dev);
        struct pxa_irda *si;
 
        if (dev && netif_running(dev)) {
@@ -718,9 +718,9 @@ static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
        return 0;
 }
 
-static int pxa_irda_resume(struct device *_dev)
+static int pxa_irda_resume(struct platform_device *_dev)
 {
-       struct net_device *dev = dev_get_drvdata(_dev);
+       struct net_device *dev = platform_get_drvdata(_dev);
        struct pxa_irda *si;
 
        if (dev && netif_running(dev)) {
@@ -746,9 +746,8 @@ static int pxa_irda_init_iobuf(iobuff_t *io, int size)
        return io->head ? 0 : -ENOMEM;
 }
 
-static int pxa_irda_probe(struct device *_dev)
+static int pxa_irda_probe(struct platform_device *pdev)
 {
-       struct platform_device *pdev = to_platform_device(_dev);
        struct net_device *dev;
        struct pxa_irda *si;
        unsigned int baudrate_mask;
@@ -822,9 +821,9 @@ err_mem_1:
        return err;
 }
 
-static int pxa_irda_remove(struct device *_dev)
+static int pxa_irda_remove(struct platform_device *_dev)
 {
-       struct net_device *dev = dev_get_drvdata(_dev);
+       struct net_device *dev = platform_get_drvdata(_dev);
 
        if (dev) {
                struct pxa_irda *si = netdev_priv(dev);
@@ -840,9 +839,10 @@ static int pxa_irda_remove(struct device *_dev)
        return 0;
 }
 
-static struct device_driver pxa_ir_driver = {
-       .name           = "pxa2xx-ir",
-       .bus            = &platform_bus_type,
+static struct platform_driver pxa_ir_driver = {
+       .driver         = {
+               .name   = "pxa2xx-ir",
+       },
        .probe          = pxa_irda_probe,
        .remove         = pxa_irda_remove,
        .suspend        = pxa_irda_suspend,
@@ -851,12 +851,12 @@ static struct device_driver pxa_ir_driver = {
 
 static int __init pxa_irda_init(void)
 {
-       return driver_register(&pxa_ir_driver);
+       return platform_driver_register(&pxa_ir_driver);
 }
 
 static void __exit pxa_irda_exit(void)
 {
-       driver_unregister(&pxa_ir_driver);
+       platform_driver_unregister(&pxa_ir_driver);
 }
 
 module_init(pxa_irda_init);
index 9fa294a546d625bd78070ac9f5947b30607f6674..2a57bc67ce357cc45dc82c589941d9fe12fbd198 100644 (file)
@@ -22,7 +22,7 @@
 
 struct sir_fsm {
        struct semaphore        sem;
-       struct work_struct      work;
+       struct delayed_work     work;
        unsigned                state, substate;
        int                     param;
        int                     result;
index 3b5854d10c17f348ffb6482c738c4a865a61cc0d..17b0c3ab6201a27d10af5da3254c876af52025ee 100644 (file)
@@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev)
  * Both must be unlocked/restarted on completion - but only on final exit.
  */
 
-static void sirdev_config_fsm(void *data)
+static void sirdev_config_fsm(struct work_struct *work)
 {
-       struct sir_dev *dev = data;
+       struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work);
        struct sir_fsm *fsm = &dev->fsm;
        int next_state;
        int ret = -1;
@@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
        fsm->param = param;
        fsm->result = 0;
 
-       INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
-       queue_work(irda_sir_wq, &fsm->work);
+       INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm);
+       queue_delayed_work(irda_sir_wq, &fsm->work, 0);
        return 0;
 }
 
index 3b4c478759356fa9f7b8af473385520fa1c023c2..c14a74634fd50a67f866cc9d2621a5772ede8f35 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/usb.h>
 #include <linux/crc32.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <net/irda/irda.h>
 #include <net/irda/irlap.h>
 #include <net/irda/irda_device.h>
index 2284e2ce1692630226e065c0c7c3c0276f194c04..d6f4f185bf3780de7c4375308602675e5628f76c 100644 (file)
@@ -166,7 +166,7 @@ struct veth_msg {
 
 struct veth_lpar_connection {
        HvLpIndex remote_lp;
-       struct work_struct statemachine_wq;
+       struct delayed_work statemachine_wq;
        struct veth_msg *msgs;
        int num_events;
        struct veth_cap_data local_caps;
@@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = {
 
 static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx)
 {
-       schedule_work(&cnx->statemachine_wq);
+       schedule_delayed_work(&cnx->statemachine_wq, 0);
 }
 
 static void veth_take_cap(struct veth_lpar_connection *cnx,
@@ -638,9 +638,11 @@ static int veth_process_caps(struct veth_lpar_connection *cnx)
 }
 
 /* FIXME: The gotos here are a bit dubious */
-static void veth_statemachine(void *p)
+static void veth_statemachine(struct work_struct *work)
 {
-       struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p;
+       struct veth_lpar_connection *cnx =
+               container_of(work, struct veth_lpar_connection,
+                            statemachine_wq.work);
        int rlp = cnx->remote_lp;
        int rc;
 
@@ -827,7 +829,7 @@ static int veth_init_connection(u8 rlp)
 
        cnx->remote_lp = rlp;
        spin_lock_init(&cnx->lock);
-       INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx);
+       INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine);
 
        init_timer(&cnx->ack_timer);
        cnx->ack_timer.function = veth_timed_ack;
index 7b127212e62b579107881901e5d4e00e07d13464..e628126c9c49178211032801e1da1a17a64af338 100644 (file)
@@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
 void ixgb_set_ethtool_ops(struct net_device *netdev);
 static void ixgb_tx_timeout(struct net_device *dev);
-static void ixgb_tx_timeout_task(struct net_device *dev);
+static void ixgb_tx_timeout_task(struct work_struct *work);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
                                  struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -489,8 +489,7 @@ ixgb_probe(struct pci_dev *pdev,
        adapter->watchdog_timer.function = &ixgb_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
 
-       INIT_WORK(&adapter->tx_timeout_task,
-                 (void (*)(void *))ixgb_tx_timeout_task, netdev);
+       INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 
        strcpy(netdev->name, "eth%d");
        if((err = register_netdev(netdev)))
@@ -1493,9 +1492,10 @@ ixgb_tx_timeout(struct net_device *netdev)
 }
 
 static void
-ixgb_tx_timeout_task(struct net_device *netdev)
+ixgb_tx_timeout_task(struct work_struct *work)
 {
-       struct ixgb_adapter *adapter = netdev_priv(netdev);
+       struct ixgb_adapter *adapter =
+               container_of(work, struct ixgb_adapter, tx_timeout_task);
 
        adapter->tx_timeout_count++;
        ixgb_down(adapter, TRUE);
index 4256c13c73c290d8a70e7e12114b2882a4ed467f..a3843320dbe1df8efdebf7aef1d12e77e197a280 100644 (file)
@@ -368,7 +368,7 @@ static void cleanup_card(struct net_device *dev)
        kfree(lp);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index f4d815bca643e878a93eb1a9cfb1fe4d091d0923..ea392f2a5aa26aeb9c9f7670fbb6303e2c9c1672 100644 (file)
 #define DEB(x,y)       if (i596_debug & (x)) { y; }
 
 
-#define  CHECK_WBACK(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
+#define  CHECK_WBACK(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
-#define  CHECK_INV(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_FROM_DEVICE); } while(0)
+#define  CHECK_INV(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
 
-#define  CHECK_WBACK_INV(addr,len) \
-       do { dma_cache_sync((void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
+#define  CHECK_WBACK_INV(priv, addr,len) \
+       do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
 
 
 #define PA_I82596_RESET                0       /* Offsets relative to LASI-LAN-Addr.*/
@@ -449,10 +449,10 @@ static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
 
 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-       CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+       CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
        while (--delcnt && lp->iscp.stat) {
                udelay(10);
-               CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
+               CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
        }
        if (!delcnt) {
                printk("%s: %s, iscp.stat %04x, didn't clear\n",
@@ -466,10 +466,10 @@ static inline int wait_istat(struct net_device *dev, struct i596_private *lp, in
 
 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
 {
-       CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
        while (--delcnt && lp->scb.command) {
                udelay(10);
-               CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
        }
        if (!delcnt) {
                printk("%s: %s, status %4.4x, cmd %4.4x.\n",
@@ -522,7 +522,7 @@ static void i596_display_data(struct net_device *dev)
                        rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
                rbd = rbd->v_next;
        } while (rbd != lp->rbd_head);
-       CHECK_INV(lp, sizeof(struct i596_private));
+       CHECK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -592,7 +592,7 @@ static inline void init_rx_bufs(struct net_device *dev)
        rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
        rfd->cmd = CMD_EOL|CMD_FLEX;
 
-       CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 static inline void remove_rx_bufs(struct net_device *dev)
@@ -629,7 +629,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
        lp->rbd_head = lp->rbds;
        lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
 
-       CHECK_WBACK_INV(lp, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
 }
 
 
@@ -663,8 +663,8 @@ static int init_i596_mem(struct net_device *dev)
 
        DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
 
-       CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
-       CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
+       CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
+       CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
 
        MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
 
@@ -678,25 +678,25 @@ static int init_i596_mem(struct net_device *dev)
        rebuild_rx_bufs(dev);
 
        lp->scb.command = 0;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
        enable_irq(dev->irq);   /* enable IRQs from LAN */
 
        DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
        memcpy(lp->cf_cmd.i596_config, init_setup, 14);
        lp->cf_cmd.cmd.command = CmdConfigure;
-       CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
+       CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
        DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
        memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
        lp->sa_cmd.cmd.command = CmdSASetup;
-       CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
+       CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
        DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
        lp->tdr_cmd.cmd.command = CmdTDR;
-       CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
+       CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
        i596_add_cmd(dev, &lp->tdr_cmd.cmd);
 
        spin_lock_irqsave (&lp->lock, flags);
@@ -708,7 +708,7 @@ static int init_i596_mem(struct net_device *dev)
        DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
        lp->scb.command = RX_START;
        lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 
        CA(dev);
 
@@ -740,13 +740,13 @@ static inline int i596_rx(struct net_device *dev)
 
        rfd = lp->rfd_head;             /* Ref next frame to check */
 
-       CHECK_INV(rfd, sizeof(struct i596_rfd));
+       CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
        while ((rfd->stat) & STAT_C) {  /* Loop while complete frames */
                if (rfd->rbd == I596_NULL)
                        rbd = NULL;
                else if (rfd->rbd == lp->rbd_head->b_addr) {
                        rbd = lp->rbd_head;
-                       CHECK_INV(rbd, sizeof(struct i596_rbd));
+                       CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
                }
                else {
                        printk("%s: rbd chain broken!\n", dev->name);
@@ -790,7 +790,7 @@ static inline int i596_rx(struct net_device *dev)
                                dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
                                rbd->v_data = newskb->data;
                                rbd->b_data = WSWAPchar(dma_addr);
-                               CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+                               CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
                        }
                        else
                                skb = dev_alloc_skb(pkt_len + 2);
@@ -842,7 +842,7 @@ memory_squeeze:
                if (rbd != NULL && (rbd->count & 0x4000)) {
                        rbd->count = 0;
                        lp->rbd_head = rbd->v_next;
-                       CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
+                       CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
                }
 
                /* Tidy the frame descriptor, marking it as end of list */
@@ -860,10 +860,10 @@ memory_squeeze:
 
                lp->scb.rfd = rfd->b_next;
                lp->rfd_head = rfd->v_next;
-               CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
-               CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
+               CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
+               CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
                rfd = lp->rfd_head;
-               CHECK_INV(rfd, sizeof(struct i596_rfd));
+               CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
        }
 
        DEB(DEB_RXFRAME, printk("frames %d\n", frames));
@@ -902,12 +902,12 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
                        ptr->v_next = NULL;
                        ptr->b_next = I596_NULL;
                }
-               CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
+               CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
        }
 
        wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
        lp->scb.cmd = I596_NULL;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
 }
 
 
@@ -925,7 +925,7 @@ static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
 
        /* FIXME: this command might cause an lpmc */
        lp->scb.command = CUC_ABORT | RX_ABORT;
-       CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
        CA(dev);
 
        /* wait for shutdown */
@@ -951,20 +951,20 @@ static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
        cmd->command |= (CMD_EOL | CMD_INTR);
        cmd->v_next = NULL;
        cmd->b_next = I596_NULL;
-       CHECK_WBACK(cmd, sizeof(struct i596_cmd));
+       CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
 
        spin_lock_irqsave (&lp->lock, flags);
 
        if (lp->cmd_head != NULL) {
                lp->cmd_tail->v_next = cmd;
                lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
-               CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
+               CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
        } else {
                lp->cmd_head = cmd;
                wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
                lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
                lp->scb.command = CUC_START;
-               CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
                CA(dev);
        }
        lp->cmd_tail = cmd;
@@ -998,12 +998,12 @@ static int i596_test(struct net_device *dev)
        data = virt_to_dma(lp,tint);
 
        tint[1] = -1;
-       CHECK_WBACK(tint,PAGE_SIZE);
+       CHECK_WBACK(lp, tint, PAGE_SIZE);
 
        MPU_PORT(dev, 1, data);
 
        for(data = 1000000; data; data--) {
-               CHECK_INV(tint,PAGE_SIZE);
+               CHECK_INV(lp, tint, PAGE_SIZE);
                if(tint[1] != -1)
                        break;
 
@@ -1061,7 +1061,7 @@ static void i596_tx_timeout (struct net_device *dev)
                /* Issue a channel attention signal */
                DEB(DEB_ERRORS, printk("Kicking board.\n"));
                lp->scb.command = CUC_START | RX_START;
-               CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
+               CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
                CA (dev);
                lp->last_restart = lp->stats.tx_packets;
        }
@@ -1118,8 +1118,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
                tbd->data = WSWAPchar(tx_cmd->dma_addr);
 
                DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
-               CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
-               CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
+               CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
+               CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
                i596_add_cmd(dev, &tx_cmd->cmd);
 
                lp->stats.tx_packets++;
@@ -1228,7 +1228,7 @@ static int __devinit i82596_probe(struct net_device *dev,
        lp->dma_addr = dma_addr;
        lp->dev = gen_dev;
 
-       CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
+       CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
 
        i = register_netdev(dev);
        if (i) {
@@ -1295,7 +1295,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
                        DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
 
                while (lp->cmd_head != NULL) {
-                       CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
+                       CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
                        if (!(lp->cmd_head->status & STAT_C))
                                break;
 
@@ -1358,7 +1358,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
                        }
                        ptr->v_next = NULL;
                        ptr->b_next = I596_NULL;
-                       CHECK_WBACK(ptr, sizeof(struct i596_cmd));
+                       CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
                        lp->last_cmd = jiffies;
                }
 
@@ -1372,13 +1372,13 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
 
                        ptr->command &= 0x1fff;
                        ptr = ptr->v_next;
-                       CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
+                       CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
                }
 
                if ((lp->cmd_head != NULL))
                        ack_cmd |= CUC_START;
                lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
-               CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
+               CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
        }
        if ((status & 0x1000) || (status & 0x4000)) {
                if ((status & 0x4000))
@@ -1397,7 +1397,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
        }
        wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
        lp->scb.command = ack_cmd;
-       CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
        /* DANGER: I suspect that some kind of interrupt
         acknowledgement aside from acking the 82596 might be needed
@@ -1426,7 +1426,7 @@ static int i596_close(struct net_device *dev)
 
        wait_cmd(dev, lp, 100, "close1 timed out");
        lp->scb.command = CUC_ABORT | RX_ABORT;
-       CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
+       CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
 
        CA(dev);
 
@@ -1486,7 +1486,7 @@ static void set_multicast_list(struct net_device *dev)
                               dev->name);
                else {
                        lp->cf_cmd.cmd.command = CmdConfigure;
-                       CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
+                       CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
                        i596_add_cmd(dev, &lp->cf_cmd.cmd);
                }
        }
@@ -1514,7 +1514,7 @@ static void set_multicast_list(struct net_device *dev)
                                DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
                                                dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
                }
-               CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
+               CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
                i596_add_cmd(dev, &cmd->cmd);
        }
 }
index 5795ee1162054a5701076f1d2092528de95ff119..0a08d0c4e7b4b1063d76c7d46ee1a0d85f6f6724 100644 (file)
@@ -440,7 +440,7 @@ static void cleanup_card(struct net_device *dev)
        iounmap(ei_status.mem);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index 9997081c6daea8458fd1b1fad296733f55885dcd..c41ae4286eeae76daba0ee77bf24d08a3cbf1963 100644 (file)
@@ -277,9 +277,11 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev)
  *
  * Actual routine to reset the adapter when a timeout on Tx has occurred
  */
-static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
+static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
 {
-       struct mv643xx_private *mp = netdev_priv(dev);
+       struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
+                                                 tx_timeout_task);
+       struct net_device *dev = mp->mii.dev; /* yuck */
 
        if (!netif_running(dev))
                return;
@@ -1098,7 +1100,7 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
                                         ETH_TX_ENABLE_INTERRUPT;
                        mp->tx_skb[tx_index] = skb;
                } else
-                       mp->tx_skb[tx_index] = 0;
+                       mp->tx_skb[tx_index] = NULL;
 
                desc = &mp->p_tx_desc_area[tx_index];
                desc->l4i_chk = 0;
@@ -1134,7 +1136,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
                eth_tx_fill_frag_descs(mp, skb);
 
                length = skb_headlen(skb);
-               mp->tx_skb[tx_index] = 0;
+               mp->tx_skb[tx_index] = NULL;
        } else {
                cmd_sts |= ETH_ZERO_PADDING |
                           ETH_TX_LAST_DESC |
@@ -1360,8 +1362,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 #endif
 
        /* Configure the timeout task */
-       INIT_WORK(&mp->tx_timeout_task,
-                       (void (*)(void *))mv643xx_eth_tx_timeout_task, dev);
+       INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
 
        spin_lock_init(&mp->lock);
 
index 56a82d8ee8f52c4a88c4f8bfa257b2f7122d5cb2..e246d00bba6d0467a851d642984894cbd021ee22 100644 (file)
@@ -184,7 +184,7 @@ static int m147lance_close(struct net_device *dev)
 MODULE_LICENSE("GPL");
 
 static struct net_device *dev_mvme147_lance;
-int init_module(void)
+int __init init_module(void)
 {
        dev_mvme147_lance = mvme147lance_probe(-1);
        if (IS_ERR(dev_mvme147_lance))
@@ -192,7 +192,7 @@ int init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        struct m147lance_private *lp = dev_mvme147_lance->priv;
        unregister_netdev(dev_mvme147_lance);
index 36350e6db1c1bec6a6aac55eb3c578a1846b659e..81f127a78afae71206afde2a32ec4295c068aa20 100644 (file)
@@ -89,7 +89,7 @@ MODULE_LICENSE("Dual BSD/GPL");
 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
 
-#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
+#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
 
 struct myri10ge_rx_buffer_state {
@@ -156,8 +156,8 @@ struct myri10ge_priv {
        int sram_size;
        unsigned long board_span;
        unsigned long iomem_base;
-       u32 __iomem *irq_claim;
-       u32 __iomem *irq_deassert;
+       __be32 __iomem *irq_claim;
+       __be32 __iomem *irq_deassert;
        char *mac_addr_string;
        struct mcp_cmd_response *cmd;
        dma_addr_t cmd_bus;
@@ -165,10 +165,10 @@ struct myri10ge_priv {
        dma_addr_t fw_stats_bus;
        struct pci_dev *pdev;
        int msi_enabled;
-       unsigned int link_state;
+       __be32 link_state;
        unsigned int rdma_tags_available;
        int intr_coal_delay;
-       u32 __iomem *intr_coal_delay_ptr;
+       __be32 __iomem *intr_coal_delay_ptr;
        int mtrr;
        int wake_queue;
        int stop_queue;
@@ -273,6 +273,11 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
 
 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
 
+static inline void put_be32(__be32 val, __be32 __iomem *p)
+{
+       __raw_writel((__force __u32)val, (__force void __iomem *)p);
+}
+
 static int
 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
                  struct myri10ge_cmd *data, int atomic)
@@ -296,7 +301,7 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
 
        buf->response_addr.low = htonl(dma_low);
        buf->response_addr.high = htonl(dma_high);
-       response->result = MYRI10GE_NO_RESPONSE_RESULT;
+       response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
        mb();
        myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
 
@@ -311,14 +316,14 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
                 * (1ms will be enough for those commands) */
                for (sleep_total = 0;
                     sleep_total < 1000
-                    && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+                    && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
                     sleep_total += 10)
                        udelay(10);
        } else {
                /* use msleep for most command */
                for (sleep_total = 0;
                     sleep_total < 15
-                    && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+                    && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
                     sleep_total++)
                        msleep(1);
        }
@@ -393,7 +398,7 @@ abort:
 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
 {
        char __iomem *submit;
-       u32 buf[16];
+       __be32 buf[16];
        u32 dma_low, dma_high;
        int i;
 
@@ -410,7 +415,7 @@ static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
 
        buf[0] = htonl(dma_high);       /* confirm addr MSW */
        buf[1] = htonl(dma_low);        /* confirm addr LSW */
-       buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);       /* confirm data */
+       buf[2] = MYRI10GE_NO_CONFIRM_DATA;      /* confirm data */
        buf[3] = htonl(dma_high);       /* dummy addr MSW */
        buf[4] = htonl(dma_low);        /* dummy addr LSW */
        buf[5] = htonl(enable); /* enable? */
@@ -479,7 +484,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
        }
 
        /* check id */
-       hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+       hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
        if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
                dev_err(dev, "Bad firmware file\n");
                status = -EINVAL;
@@ -550,7 +555,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
 static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
 {
        char __iomem *submit;
-       u32 buf[16];
+       __be32 buf[16];
        u32 dma_low, dma_high, size;
        int status, i;
 
@@ -600,7 +605,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
 
        buf[0] = htonl(dma_high);       /* confirm addr MSW */
        buf[1] = htonl(dma_low);        /* confirm addr LSW */
-       buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);       /* confirm data */
+       buf[2] = MYRI10GE_NO_CONFIRM_DATA;      /* confirm data */
 
        /* FIX: All newest firmware should un-protect the bottom of
         * the sram before handoff. However, the very first interfaces
@@ -705,21 +710,21 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
 
        status |=
            myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
-       mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0);
+       mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0);
        if (!mgp->msi_enabled) {
                status |= myri10ge_send_cmd
                    (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0);
-               mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0);
+               mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
 
        }
        status |= myri10ge_send_cmd
            (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
-       mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0);
+       mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
        if (status != 0) {
                dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
                return status;
        }
-       __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+       put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
 
        /* Run a small DMA test.
         * The magic multipliers to the length tell the firmware
@@ -786,14 +791,16 @@ static inline void
 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
                    struct mcp_kreq_ether_recv *src)
 {
-       u32 low;
+       __be32 low;
 
        low = src->addr_low;
-       src->addr_low = DMA_32BIT_MASK;
-       myri10ge_pio_copy(dst, src, 8 * sizeof(*src));
+       src->addr_low = htonl(DMA_32BIT_MASK);
+       myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
+       mb();
+       myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
        mb();
        src->addr_low = low;
-       __raw_writel(low, &dst->addr_low);
+       put_be32(low, &dst->addr_low);
        mb();
 }
 
@@ -939,11 +946,11 @@ done:
        return retval;
 }
 
-static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
 {
        struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
 
-       if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+       if ((skb->protocol == htons(ETH_P_8021Q)) &&
            (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
             vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
                skb->csum = hw_csum;
@@ -953,7 +960,7 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
 
 static inline unsigned long
 myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
-                int bytes, int len, int csum)
+                int bytes, int len, __wsum csum)
 {
        dma_addr_t bus;
        struct sk_buff *skb;
@@ -986,12 +993,12 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
 
        skb->protocol = eth_type_trans(skb, mgp->dev);
        if (mgp->csum_flag) {
-               if ((skb->protocol == ntohs(ETH_P_IP)) ||
-                   (skb->protocol == ntohs(ETH_P_IPV6))) {
-                       skb->csum = ntohs((u16) csum);
+               if ((skb->protocol == htons(ETH_P_IP)) ||
+                   (skb->protocol == htons(ETH_P_IPV6))) {
+                       skb->csum = csum;
                        skb->ip_summed = CHECKSUM_COMPLETE;
                } else
-                       myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
+                       myri10ge_vlan_ip_csum(skb, csum);
        }
 
        netif_receive_skb(skb);
@@ -1060,12 +1067,12 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
        int idx = rx_done->idx;
        int cnt = rx_done->cnt;
        u16 length;
-       u16 checksum;
+       __wsum checksum;
 
        while (rx_done->entry[idx].length != 0 && *limit != 0) {
                length = ntohs(rx_done->entry[idx].length);
                rx_done->entry[idx].length = 0;
-               checksum = ntohs(rx_done->entry[idx].checksum);
+               checksum = csum_unfold(rx_done->entry[idx].checksum);
                if (length <= mgp->small_bytes)
                        rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
                                                 mgp->small_bytes,
@@ -1142,7 +1149,7 @@ static int myri10ge_poll(struct net_device *netdev, int *budget)
 
        if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) {
                netif_rx_complete(netdev);
-               __raw_writel(htonl(3), mgp->irq_claim);
+               put_be32(htonl(3), mgp->irq_claim);
                return 0;
        }
        return 1;
@@ -1166,7 +1173,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
                netif_rx_schedule(mgp->dev);
 
        if (!mgp->msi_enabled) {
-               __raw_writel(0, mgp->irq_deassert);
+               put_be32(0, mgp->irq_deassert);
                if (!myri10ge_deassert_wait)
                        stats->valid = 0;
                mb();
@@ -1195,7 +1202,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
 
        myri10ge_check_statblock(mgp);
 
-       __raw_writel(htonl(3), mgp->irq_claim + 1);
+       put_be32(htonl(3), mgp->irq_claim + 1);
        return (IRQ_HANDLED);
 }
 
@@ -1233,7 +1240,7 @@ myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
        struct myri10ge_priv *mgp = netdev_priv(netdev);
 
        mgp->intr_coal_delay = coal->rx_coalesce_usecs;
-       __raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+       put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
        return 0;
 }
 
@@ -1748,7 +1755,7 @@ static int myri10ge_open(struct net_device *dev)
                goto abort_with_rings;
        }
 
-       mgp->link_state = -1;
+       mgp->link_state = htonl(~0U);
        mgp->rdma_tags_available = 15;
 
        netif_poll_enable(mgp->dev);    /* must happen prior to any irq */
@@ -1876,7 +1883,7 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
 
        /* re-write the last 32-bits with the valid flags */
        src->flags = last_flags;
-       __raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3);
+       put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
        tx->req += cnt;
        mb();
 }
@@ -1919,7 +1926,8 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
        struct myri10ge_tx_buf *tx = &mgp->tx;
        struct skb_frag_struct *frag;
        dma_addr_t bus;
-       u32 low, high_swapped;
+       u32 low;
+       __be32 high_swapped;
        unsigned int len;
        int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
        u16 pseudo_hdr_offset, cksum_offset;
@@ -1964,7 +1972,6 @@ again:
                        cksum_offset = 0;
                        pseudo_hdr_offset = 0;
                } else {
-                       pseudo_hdr_offset = htons(pseudo_hdr_offset);
                        odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
                        flags |= MXGEFW_FLAGS_CKSUM;
                }
@@ -1986,7 +1993,7 @@ again:
                /* for TSO, pseudo_hdr_offset holds mss.
                 * The firmware figures out where to put
                 * the checksum by parsing the header. */
-               pseudo_hdr_offset = htons(mss);
+               pseudo_hdr_offset = mss;
        } else
 #endif                         /*NETIF_F_TSO */
                /* Mark small packets, and pad out tiny packets */
@@ -2086,7 +2093,7 @@ again:
 #endif                         /* NETIF_F_TSO */
                        req->addr_high = high_swapped;
                        req->addr_low = htonl(low);
-                       req->pseudo_hdr_offset = pseudo_hdr_offset;
+                       req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
                        req->pad = 0;   /* complete solid 16-byte block; does this matter? */
                        req->rdma_count = 1;
                        req->length = htons(seglen);
@@ -2199,6 +2206,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
        struct myri10ge_cmd cmd;
        struct myri10ge_priv *mgp;
        struct dev_mc_list *mc_list;
+       __be32 data[2] = {0, 0};
        int err;
 
        mgp = netdev_priv(dev);
@@ -2237,10 +2245,9 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
 
        /* Walk the multicast list, and add each address */
        for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
-               memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
-               memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
-               cmd.data0 = htonl(cmd.data0);
-               cmd.data1 = htonl(cmd.data1);
+               memcpy(data, &mc_list->dmi_addr, 6);
+               cmd.data0 = ntohl(data[0]);
+               cmd.data1 = ntohl(data[1]);
                err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
                                        &cmd, 1);
 
@@ -2615,9 +2622,10 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
  * This watchdog is used to check whether the board has suffered
  * from a parity error and needs to be recovered.
  */
-static void myri10ge_watchdog(void *arg)
+static void myri10ge_watchdog(struct work_struct *work)
 {
-       struct myri10ge_priv *mgp = arg;
+       struct myri10ge_priv *mgp =
+               container_of(work, struct myri10ge_priv, watchdog_work);
        u32 reboot;
        int status;
        u16 cmd, vendor;
@@ -2887,7 +2895,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                    (unsigned long)mgp);
 
        SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
-       INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+       INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
        status = register_netdev(netdev);
        if (status != 0) {
                dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
index 9519ae7cd5ec74f0dc9e64fca428b7b515c03ebe..29463b301a84f5a1eb9614bb01fab4bd021d2d57 100644 (file)
@@ -6,23 +6,23 @@
 
 /* 8 Bytes */
 struct mcp_dma_addr {
-       u32 high;
-       u32 low;
+       __be32 high;
+       __be32 low;
 };
 
 /* 4 Bytes */
 struct mcp_slot {
-       u16 checksum;
-       u16 length;
+       __sum16 checksum;
+       __be16 length;
 };
 
 /* 64 Bytes */
 struct mcp_cmd {
-       u32 cmd;
-       u32 data0;              /* will be low portion if data > 32 bits */
+       __be32 cmd;
+       __be32 data0;           /* will be low portion if data > 32 bits */
        /* 8 */
-       u32 data1;              /* will be high portion if data > 32 bits */
-       u32 data2;              /* currently unused.. */
+       __be32 data1;           /* will be high portion if data > 32 bits */
+       __be32 data2;           /* currently unused.. */
        /* 16 */
        struct mcp_dma_addr response_addr;
        /* 24 */
@@ -31,8 +31,8 @@ struct mcp_cmd {
 
 /* 8 Bytes */
 struct mcp_cmd_response {
-       u32 data;
-       u32 result;
+       __be32 data;
+       __be32 result;
 };
 
 /*
@@ -73,10 +73,10 @@ union mcp_pso_or_cumlen {
 
 /* 16 Bytes */
 struct mcp_kreq_ether_send {
-       u32 addr_high;
-       u32 addr_low;
-       u16 pseudo_hdr_offset;
-       u16 length;
+       __be32 addr_high;
+       __be32 addr_low;
+       __be16 pseudo_hdr_offset;
+       __be16 length;
        u8 pad;
        u8 rdma_count;
        u8 cksum_offset;        /* where to start computing cksum */
@@ -85,8 +85,8 @@ struct mcp_kreq_ether_send {
 
 /* 8 Bytes */
 struct mcp_kreq_ether_recv {
-       u32 addr_high;
-       u32 addr_low;
+       __be32 addr_high;
+       __be32 addr_low;
 };
 
 /* Commands */
@@ -219,19 +219,19 @@ enum myri10ge_mcp_cmd_status {
 
 struct mcp_irq_data {
        /* add new counters at the beginning */
-       u32 future_use[5];
-       u32 dropped_multicast_filtered;
+       __be32 future_use[5];
+       __be32 dropped_multicast_filtered;
        /* 40 Bytes */
-       u32 send_done_count;
-
-       u32 link_up;
-       u32 dropped_link_overflow;
-       u32 dropped_link_error_or_filtered;
-       u32 dropped_runt;
-       u32 dropped_overrun;
-       u32 dropped_no_small_buffer;
-       u32 dropped_no_big_buffer;
-       u32 rdma_tags_available;
+       __be32 send_done_count;
+
+       __be32 link_up;
+       __be32 dropped_link_overflow;
+       __be32 dropped_link_error_or_filtered;
+       __be32 dropped_runt;
+       __be32 dropped_overrun;
+       __be32 dropped_no_small_buffer;
+       __be32 dropped_no_big_buffer;
+       __be32 rdma_tags_available;
 
        u8 tx_stopped;
        u8 link_down;
index 487f7792fd469bfb871a258690fa92e74d0167d2..16a810dd6d515d4ed7a6f61ceb7b443131c34b9e 100644 (file)
@@ -36,7 +36,7 @@
 struct mcp_gen_header {
        /* the first 4 fields are filled at compile time */
        unsigned header_length;
-       unsigned mcp_type;
+       __be32 mcp_type;
        char version[128];
        unsigned mcp_globals;   /* pointer to mcp-type specific structure */
 
index 787aa4221528cb4f47ad038d7ee6b29d3e86354f..a5c4199e2754cff0abc74303afd443e684386bce 100644 (file)
@@ -867,7 +867,7 @@ static void cleanup_card(struct net_device *dev)
        release_region(dev->base_addr, NE_IO_EXTENT);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index 5fccfea66d8751cd66f300f383b3d95ba1e444c3..089b5bb702fcbccbc5dd6072fe5f5c315e11123b 100644 (file)
@@ -813,7 +813,7 @@ static void cleanup_card(struct net_device *dev)
        release_region(dev->base_addr, NE_IO_EXTENT);
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index d925053fe597c7a0ae6c257de91622a04d01eb35..b5410bee5f21dc962bca2508ad4a9a88dd47da35 100644 (file)
@@ -1,25 +1,25 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
- *                            
+ *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *                                   
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
 
 #include "netxen_nic_hw.h"
 
-#define NETXEN_NIC_BUILD_NO     "5"
-#define _NETXEN_NIC_LINUX_MAJOR 2
+#define NETXEN_NIC_BUILD_NO     "1"
+#define _NETXEN_NIC_LINUX_MAJOR 3
 #define _NETXEN_NIC_LINUX_MINOR 3
-#define _NETXEN_NIC_LINUX_SUBVERSION 59
-#define NETXEN_NIC_LINUX_VERSIONID  "2.3.59" "-" NETXEN_NIC_BUILD_NO
-#define NETXEN_NIC_FW_VERSIONID "2.3.59"
+#define _NETXEN_NIC_LINUX_SUBVERSION 2
+#define NETXEN_NIC_LINUX_VERSIONID  "3.3.2" "-" NETXEN_NIC_BUILD_NO
+#define NETXEN_NIC_FW_VERSIONID "3.3.2"
 
 #define RCV_DESC_RINGSIZE      \
        (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
 #define STATUS_DESC_RINGSIZE   \
        (sizeof(struct status_desc)* adapter->max_rx_desc_count)
+#define LRO_DESC_RINGSIZE      \
+       (sizeof(rcvDesc_t) * adapter->max_lro_rx_desc_count)
 #define TX_RINGSIZE    \
        (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
 #define RCV_BUFFSIZE   \
        (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
 #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
 
-#define NETXEN_NETDEV_STATUS 0x1
+#define NETXEN_NETDEV_STATUS           0x1
+#define NETXEN_RCV_PRODUCER_OFFSET     0
+#define NETXEN_RCV_PEG_DB_ID           2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
 
 #define ADDR_IN_WINDOW1(off)   \
        ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+/*
+ * In netxen_nic_down(), we must wait for any pending callback requests into
+ * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
+ * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
+ * does this synchronization.
+ *
+ * Normally, schedule_work()/flush_scheduled_work() could have worked, but
+ * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
+ * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
+ * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
+ * linkwatch_event() to be executed which also attempts to acquire the rtnl
+ * lock thus causing a deadlock.
+ */
+
+#define SCHEDULE_WORK(tp)      queue_work(netxen_workq, tp)
+#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
+extern struct workqueue_struct *netxen_workq;
 
 /* 
  * normalize a 64MB crb address to 32MB PCI window 
  * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
  */
-#define NETXEN_CRB_NORMAL(reg)        \
-       (reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST
+#define NETXEN_CRB_NORMAL(reg) \
+       ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
 
 #define NETXEN_CRB_NORMALIZE(adapter, reg) \
        pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
 
+#define DB_NORMALIZE(adapter, off) \
+       (adapter->ahw.db_base + (off))
+
+#define NX_P2_C0               0x24
+#define NX_P2_C1               0x25
+
 #define FIRST_PAGE_GROUP_START 0
-#define FIRST_PAGE_GROUP_END   0x400000
+#define FIRST_PAGE_GROUP_END   0x100000
 
 #define SECOND_PAGE_GROUP_START        0x4000000
 #define SECOND_PAGE_GROUP_END  0x66BC000
 #define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
 #define THIRD_PAGE_GROUP_SIZE  THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
 
-#define MAX_RX_BUFFER_LENGTH           2000
+#define MAX_RX_BUFFER_LENGTH           1760
 #define MAX_RX_JUMBO_BUFFER_LENGTH     9046
-#define RX_DMA_MAP_LEN                 (MAX_RX_BUFFER_LENGTH - NET_IP_ALIGN)
+#define MAX_RX_LRO_BUFFER_LENGTH       ((48*1024)-512)
+#define RX_DMA_MAP_LEN                 (MAX_RX_BUFFER_LENGTH - 2)
 #define RX_JUMBO_DMA_MAP_LEN   \
-       (MAX_RX_JUMBO_BUFFER_LENGTH - NET_IP_ALIGN)
+       (MAX_RX_JUMBO_BUFFER_LENGTH - 2)
+#define RX_LRO_DMA_MAP_LEN             (MAX_RX_LRO_BUFFER_LENGTH - 2)
 #define NETXEN_ROM_ROUNDUP             0x80000000ULL
 
 /*
@@ -151,30 +181,38 @@ enum {
 /* Host writes the following to notify that it has done the init-handshake */
 #define PHAN_INITIALIZE_ACK    0xf00f
 
-#define NUM_RCV_DESC_RINGS     2       /* No of Rcv Descriptor contexts */
+#define NUM_RCV_DESC_RINGS     3       /* No of Rcv Descriptor contexts */
 
 /* descriptor types */
 #define RCV_DESC_NORMAL                0x01
 #define RCV_DESC_JUMBO         0x02
+#define RCV_DESC_LRO           0x04
 #define RCV_DESC_NORMAL_CTXID  0
 #define RCV_DESC_JUMBO_CTXID   1
+#define RCV_DESC_LRO_CTXID     2
 
 #define RCV_DESC_TYPE(ID) \
-       ((ID == RCV_DESC_JUMBO_CTXID) ? RCV_DESC_JUMBO : RCV_DESC_NORMAL)
+       ((ID == RCV_DESC_JUMBO_CTXID)   \
+               ? RCV_DESC_JUMBO        \
+               : ((ID == RCV_DESC_LRO_CTXID)   \
+                       ? RCV_DESC_LRO :        \
+                       (RCV_DESC_NORMAL)))
 
 #define MAX_CMD_DESCRIPTORS            1024
 #define MAX_RCV_DESCRIPTORS            32768
-#define MAX_JUMBO_RCV_DESCRIPTORS      1024
+#define MAX_JUMBO_RCV_DESCRIPTORS      4096
+#define MAX_LRO_RCV_DESCRIPTORS                2048
 #define MAX_RCVSTATUS_DESCRIPTORS      MAX_RCV_DESCRIPTORS
 #define MAX_JUMBO_RCV_DESC     MAX_JUMBO_RCV_DESCRIPTORS
 #define MAX_RCV_DESC           MAX_RCV_DESCRIPTORS
 #define MAX_RCVSTATUS_DESC     MAX_RCV_DESCRIPTORS
-#define NUM_RCV_DESC           (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS)
 #define MAX_EPG_DESCRIPTORS    (MAX_CMD_DESCRIPTORS * 8)
-
+#define NUM_RCV_DESC           (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS + \
+                                MAX_LRO_RCV_DESCRIPTORS)
 #define MIN_TX_COUNT   4096
 #define MIN_RX_COUNT   4096
-
+#define NETXEN_CTX_SIGNATURE   0xdee0
+#define NETXEN_RCV_PRODUCER(ringid)    (ringid)
 #define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */
 
 #define PHAN_PEG_RCV_INITIALIZED       0xff01
@@ -186,6 +224,67 @@ enum {
 #define get_index_range(index,length,count)    \
        (((index) + (count)) & ((length) - 1))
 
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+
+extern unsigned long long netxen_dma_mask;
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ *     Bit 0-1         : peg_id => 0x2 for tx and 01 for rx
+ *     Bit 2           : priv_id => must be 1
+ *     Bit 3-17        : count => for doorbell
+ *     Bit 18-27       : ctx_id => Context id
+ *     Bit 28-31       : opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define _netxen_set_bits(config_word, start, bits, val)        {\
+       unsigned long long mask = (((1ULL << (bits)) - 1) << (start));  \
+       unsigned long long value = (val);       \
+       (config_word) &= ~mask; \
+       (config_word) |= (((value) << (start)) & mask); \
+}
+
+#define netxen_set_msg_peg_id(config_word, val)        \
+       _netxen_set_bits(config_word, 0, 2, val)
+#define netxen_set_msg_privid(config_word)     \
+       set_bit(2, (unsigned long*)&config_word)
+#define netxen_set_msg_count(config_word, val) \
+       _netxen_set_bits(config_word, 3, 15, val)
+#define netxen_set_msg_ctxid(config_word, val) \
+       _netxen_set_bits(config_word, 18, 10, val)
+#define netxen_set_msg_opcode(config_word, val)        \
+       _netxen_set_bits(config_word, 28, 4, val)
+
+struct netxen_rcv_context {
+       u32 rcv_ring_addr_lo;
+       u32 rcv_ring_addr_hi;
+       u32 rcv_ring_size;
+       u32 rsrvd;
+};
+
+struct netxen_ring_ctx {
+
+       /* one command ring */
+       u64 cmd_consumer_offset;
+       u32 cmd_ring_addr_lo;
+       u32 cmd_ring_addr_hi;
+       u32 cmd_ring_size;
+       u32 rsrvd;
+
+       /* three receive rings */
+       struct netxen_rcv_context rcv_ctx[3];
+
+       /* one status ring */
+       u32 sts_ring_addr_lo;
+       u32 sts_ring_addr_hi;
+       u32 sts_ring_size;
+
+       u32 ctx_id;
+} __attribute__ ((aligned(64)));
+
 /*
  * Following data structures describe the descriptors that will be used.
  * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -203,22 +302,32 @@ enum {
 #define FLAGS_IPSEC_SA_DELETE  0x08
 #define FLAGS_VLAN_TAGGED      0x10
 
-#define CMD_DESC_TOTAL_LENGTH(cmd_desc)        \
-               ((cmd_desc)->length_tcp_hdr & 0x00FFFFFF)
-#define CMD_DESC_TCP_HDR_OFFSET(cmd_desc)      \
-               (((cmd_desc)->length_tcp_hdr >> 24) & 0x0FF)
-#define CMD_DESC_PORT(cmd_desc)                ((cmd_desc)->port_ctxid & 0x0F)
-#define CMD_DESC_CTX_ID(cmd_desc)      (((cmd_desc)->port_ctxid >> 4) & 0x0F)
+#define netxen_set_cmd_desc_port(cmd_desc, var)        \
+       ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
 
-#define CMD_DESC_TOTAL_LENGTH_WRT(cmd_desc, var)       \
-               ((cmd_desc)->length_tcp_hdr |= ((var) & 0x00FFFFFF))
-#define CMD_DESC_TCP_HDR_OFFSET_WRT(cmd_desc, var)     \
-               ((cmd_desc)->length_tcp_hdr |= (((var) << 24) & 0xFF000000))
-#define CMD_DESC_PORT_WRT(cmd_desc, var)       \
-               ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define netxen_set_cmd_desc_flags(cmd_desc, val)       \
+       _netxen_set_bits((cmd_desc)->flags_opcode, 0, 7, val)
+#define netxen_set_cmd_desc_opcode(cmd_desc, val)      \
+       _netxen_set_bits((cmd_desc)->flags_opcode, 7, 6, val)
+
+#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \
+       _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 0, 8, val);
+#define netxen_set_cmd_desc_totallength(cmd_desc, val) \
+       _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 8, 24, val);
+
+#define netxen_get_cmd_desc_opcode(cmd_desc)   \
+       (((cmd_desc)->flags_opcode >> 7) & 0x003F)
+#define netxen_get_cmd_desc_totallength(cmd_desc)      \
+       (((cmd_desc)->num_of_buffers_total_length >> 8) & 0x0FFFFFF)
 
 struct cmd_desc_type0 {
-       u64 netxen_next;        /* for fragments handled by Phantom */
+       u8 tcp_hdr_offset;      /* For LSO only */
+       u8 ip_hdr_offset;       /* For LSO only */
+       /* Bit pattern: 0-6 flags, 7-12 opcode, 13-15 unused */
+       u16 flags_opcode;
+       /* Bit pattern: 0-7 total number of segments,
+          8-31 Total size of the packet */
+       u32 num_of_buffers_total_length;
        union {
                struct {
                        u32 addr_low_part2;
@@ -227,13 +336,6 @@ struct cmd_desc_type0 {
                u64 addr_buffer2;
        };
 
-       /* Bit pattern: 0-23 total length, 24-32 tcp header offset */
-       u32 length_tcp_hdr;
-       u8 ip_hdr_offset;       /* For LSO only */
-       u8 num_of_buffers;      /* total number of segments */
-       u8 flags;               /* as defined above */
-       u8 opcode;
-
        u16 reference_handle;   /* changed to u16 to add mss */
        u16 mss;                /* passed by NDIS_PACKET for LSO */
        /* Bit pattern 0-3 port, 0-3 ctx id */
@@ -248,7 +350,6 @@ struct cmd_desc_type0 {
                };
                u64 addr_buffer3;
        };
-
        union {
                struct {
                        u32 addr_low_part1;
@@ -270,6 +371,8 @@ struct cmd_desc_type0 {
                u64 addr_buffer4;
        };
 
+       u64 unused;
+
 } __attribute__ ((aligned(64)));
 
 /* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
@@ -296,22 +399,49 @@ struct rcv_desc {
 #define NETXEN_PROT_UNKNOWN    (0)
 
 /* Note: sizeof(status_desc) should always be a mutliple of 2 */
-#define STATUS_DESC_PORT(status_desc)  \
-               ((status_desc)->port_status_type_op & 0x0F)
-#define STATUS_DESC_STATUS(status_desc)        \
-               (((status_desc)->port_status_type_op >> 4) & 0x0F)
-#define STATUS_DESC_TYPE(status_desc)  \
-               (((status_desc)->port_status_type_op >> 8) & 0x0F)
-#define STATUS_DESC_OPCODE(status_desc)        \
-               (((status_desc)->port_status_type_op >> 12) & 0x0F)
+
+#define netxen_get_sts_desc_lro_cnt(status_desc)       \
+       ((status_desc)->lro & 0x7F)
+#define netxen_get_sts_desc_lro_last_frag(status_desc) \
+       (((status_desc)->lro & 0x80) >> 7)
+
+#define netxen_get_sts_port(status_desc)       \
+       ((status_desc)->status_desc_data & 0x0F)
+#define netxen_get_sts_status(status_desc)     \
+       (((status_desc)->status_desc_data >> 4) & 0x0F)
+#define netxen_get_sts_type(status_desc)       \
+       (((status_desc)->status_desc_data >> 8) & 0x0F)
+#define netxen_get_sts_totallength(status_desc)        \
+       (((status_desc)->status_desc_data >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(status_desc)  \
+       (((status_desc)->status_desc_data >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(status_desc)       \
+       (((status_desc)->status_desc_data >> 44) & 0x0F)
+#define netxen_get_sts_owner(status_desc)      \
+       (((status_desc)->status_desc_data >> 56) & 0x03)
+#define netxen_get_sts_opcode(status_desc)     \
+       (((status_desc)->status_desc_data >> 58) & 0x03F)
+
+#define netxen_clear_sts_owner(status_desc)    \
+       ((status_desc)->status_desc_data &=     \
+       ~(((unsigned long long)3) << 56 ))
+#define netxen_set_sts_owner(status_desc, val) \
+       ((status_desc)->status_desc_data |=     \
+       (((unsigned long long)((val) & 0x3)) << 56 ))
 
 struct status_desc {
-       /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-15 opcode */
-       u16 port_status_type_op;
-       u16 total_length;       /* NIC mode */
-       u16 reference_handle;   /* handle for the associated packet */
-       /* Bit pattern: 0-1 owner, 2-5 protocol */
-       u16 owner;              /* Owner of the descriptor */
+       /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+          28-43 reference_handle, 44-47 protocol, 48-52 unused
+          53-55 desc_cnt, 56-57 owner, 58-63 opcode
+        */
+       u64 status_desc_data;
+       u32 hash_value;
+       u8 hash_type;
+       u8 msg_type;
+       u8 unused;
+       /* Bit pattern: 0-6 lro_count indicates frag sequence,
+          7 last_frag indicates last frag */
+       u8 lro;
 } __attribute__ ((aligned(8)));
 
 enum {
@@ -559,11 +689,12 @@ typedef enum {
 #define PRIMARY_START          (BOOTLD_START)
 #define FLASH_CRBINIT_SIZE     (0x4000)
 #define FLASH_BRDCFG_SIZE      (sizeof(struct netxen_board_info))
-#define FLASH_USER_SIZE                (sizeof(netxen_user_info)/sizeof(u32))
+#define FLASH_USER_SIZE                (sizeof(struct netxen_user_info)/sizeof(u32))
 #define FLASH_SECONDARY_SIZE   (USER_START-SECONDARY_START)
 #define NUM_PRIMARY_SECTORS    (0x20)
 #define NUM_CONFIG_SECTORS     (1)
-#define PFX "netxen: "
+#define PFX "NetXen: "
+extern char netxen_nic_driver_name[];
 
 /* Note: Make sure to not call this before adapter->port is valid */
 #if !defined(NETXEN_DEBUG)
@@ -572,7 +703,7 @@ typedef enum {
 #else
 #define DPRINTK(klevel, fmt, args...)  do { \
        printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\
-               (adapter != NULL && adapter->port != NULL && \
+               (adapter != NULL && \
                adapter->port[0] != NULL && \
                adapter->port[0]->netdev != NULL) ? \
                adapter->port[0]->netdev->name : NULL, \
@@ -609,7 +740,6 @@ struct netxen_cmd_buffer {
        u8 frag_count;
        unsigned long time_stamp;
        u32 state;
-       u32 no_of_descriptors;
 };
 
 /* In rx_buffer, we do not need multiple fragments as is a single buffer */
@@ -618,6 +748,9 @@ struct netxen_rx_buffer {
        u64 dma;
        u16 ref_handle;
        u16 state;
+       u32 lro_expected_frags;
+       u32 lro_current_frags;
+       u32 lro_length;
 };
 
 /* Board types */
@@ -633,6 +766,8 @@ struct netxen_hardware_context {
        void __iomem *pci_base0;
        void __iomem *pci_base1;
        void __iomem *pci_base2;
+       void __iomem *db_base;
+       unsigned long db_len;
 
        u8 revision_id;
        u16 board_type;
@@ -642,14 +777,13 @@ struct netxen_hardware_context {
        u32 qg_linksup;
        /* Address of cmd ring in Phantom */
        struct cmd_desc_type0 *cmd_desc_head;
-       char *pauseaddr;
        struct pci_dev *cmd_desc_pdev;
        dma_addr_t cmd_desc_phys_addr;
-       dma_addr_t pause_physaddr;
-       struct pci_dev *pause_pdev;
        struct netxen_adapter *adapter;
 };
 
+#define RCV_RING_LRO   RCV_DESC_LRO
+
 #define MINIMUM_ETHERNET_FRAME_SIZE    64      /* With FCS */
 #define ETHERNET_FCS_SIZE              4
 
@@ -702,8 +836,13 @@ struct netxen_recv_context {
 };
 
 #define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_DMA_MASK        0xfffffffe
+#define NETXEN_DB_MAPSIZE_BYTES    0x1000
 
-struct netxen_drvops;
+struct netxen_dummy_dma {
+       void *addr;
+       dma_addr_t phys_addr;
+};
 
 struct netxen_adapter {
        struct netxen_hardware_context ahw;
@@ -714,17 +853,19 @@ struct netxen_adapter {
        spinlock_t lock;
        struct work_struct watchdog_task;
        struct work_struct tx_timeout_task;
+       struct net_device *netdev;
        struct timer_list watchdog_timer;
 
        u32 curr_window;
 
        u32 cmd_producer;
-       u32 cmd_consumer;
+       u32 *cmd_consumer;
 
        u32 last_cmd_consumer;
        u32 max_tx_desc_count;
        u32 max_rx_desc_count;
        u32 max_jumbo_rx_desc_count;
+       u32 max_lro_rx_desc_count;
        /* Num of instances active on cmd buffer ring */
        u32 proc_cmd_buf_counter;
 
@@ -746,8 +887,27 @@ struct netxen_adapter {
        struct netxen_recv_context recv_ctx[MAX_RCV_CTX];
 
        int is_up;
-       int work_done;
-       struct netxen_drvops *ops;
+       int number;
+       struct netxen_dummy_dma dummy_dma;
+
+       /* Context interface shared between card and host */
+       struct netxen_ring_ctx *ctx_desc;
+       struct pci_dev *ctx_desc_pdev;
+       dma_addr_t ctx_desc_phys_addr;
+       int (*enable_phy_interrupts) (struct netxen_adapter *, int);
+       int (*disable_phy_interrupts) (struct netxen_adapter *, int);
+       void (*handle_phy_intr) (struct netxen_adapter *);
+       int (*macaddr_set) (struct netxen_port *, netxen_ethernet_macaddr_t);
+       int (*set_mtu) (struct netxen_port *, int);
+       int (*set_promisc) (struct netxen_adapter *, int,
+                           netxen_niu_prom_mode_t);
+       int (*unset_promisc) (struct netxen_adapter *, int,
+                             netxen_niu_prom_mode_t);
+       int (*phy_read) (struct netxen_adapter *, long phy, long reg, u32 *);
+       int (*phy_write) (struct netxen_adapter *, long phy, long reg, u32 val);
+       int (*init_port) (struct netxen_adapter *, int);
+       void (*init_niu) (struct netxen_adapter *);
+       int (*stop_port) (struct netxen_adapter *, int);
 };                             /* netxen_adapter structure */
 
 /* Max number of xmit producer threads that can run simultaneously */
@@ -829,25 +989,6 @@ static inline void __iomem *pci_base(struct netxen_adapter *adapter,
        return NULL;
 }
 
-struct netxen_drvops {
-       int (*enable_phy_interrupts) (struct netxen_adapter *, int);
-       int (*disable_phy_interrupts) (struct netxen_adapter *, int);
-       void (*handle_phy_intr) (struct netxen_adapter *);
-       int (*macaddr_set) (struct netxen_port *, netxen_ethernet_macaddr_t);
-       int (*set_mtu) (struct netxen_port *, int);
-       int (*set_promisc) (struct netxen_adapter *, int,
-                           netxen_niu_prom_mode_t);
-       int (*unset_promisc) (struct netxen_adapter *, int,
-                             netxen_niu_prom_mode_t);
-       int (*phy_read) (struct netxen_adapter *, long phy, long reg, u32 *);
-       int (*phy_write) (struct netxen_adapter *, long phy, long reg, u32 val);
-       int (*init_port) (struct netxen_adapter *, int);
-       void (*init_niu) (struct netxen_adapter *);
-       int (*stop_port) (struct netxen_adapter *, int);
-};
-
-extern char netxen_nic_driver_name[];
-
 int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
                                          int port);
 int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
@@ -886,10 +1027,20 @@ int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
                          int len);
 int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
                           int len);
+int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
+                            void *data, int len);
+int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
+                             void *data, int len);
+int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter,
+                                  u64 off, void *data, int size);
+int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
+                                 u64 off, void *data, int size);
 void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
                                 unsigned long off, int data);
 
 /* Functions from netxen_nic_init.c */
+void netxen_free_adapter_offload(struct netxen_adapter *adapter);
+int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
 void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
 void netxen_load_firmware(struct netxen_adapter *adapter);
 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
@@ -921,10 +1072,12 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
                    struct netxen_port *port);
 int netxen_nic_rx_has_work(struct netxen_adapter *adapter);
 int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
-void netxen_watchdog_task(unsigned long v);
+void netxen_watchdog_task(struct work_struct *work);
 void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
                            u32 ringid);
-void netxen_process_cmd_ring(unsigned long data);
+void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, u32 ctx,
+                                u32 ringid);
+int netxen_process_cmd_ring(unsigned long data);
 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
 void netxen_nic_set_multi(struct net_device *netdev);
 int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
@@ -1018,7 +1171,6 @@ static inline void get_brd_name_by_type(u32 type, char *name)
 
 int netxen_is_flash_supported(struct netxen_adapter *adapter);
 int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]);
-
 extern void netxen_change_ringparam(struct netxen_adapter *adapter);
 extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
                                int *valp);
index 9a914aeba5bcd360c57ee2807dd37debbd329cc9..2ab4885cc950aa85eeb29dab1798da1bcbe26eef 100644 (file)
@@ -1,25 +1,25 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
- *                            
+ *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *                                   
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -118,7 +118,7 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        u32 fw_minor = 0;
        u32 fw_build = 0;
 
-       strncpy(drvinfo->driver, "netxen_nic", 32);
+       strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
        strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
        fw_major = readl(NETXEN_CRB_NORMALIZE(adapter,
                                              NETXEN_FW_VERSION_MAJOR));
@@ -210,7 +210,6 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
                       (netxen_brdtype_t) boardinfo->board_type);
                return -EIO;
-
        }
 
        return 0;
@@ -226,18 +225,18 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        /* read which mode */
        if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
                /* autonegotiation */
-               if (adapter->ops->phy_write
-                   && adapter->ops->phy_write(adapter, port->portnum,
-                                              NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                              (__le32) ecmd->autoneg) != 0)
+               if (adapter->phy_write
+                   && adapter->phy_write(adapter, port->portnum,
+                                         NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+                                         (__le32) ecmd->autoneg) != 0)
                        return -EIO;
                else
                        port->link_autoneg = ecmd->autoneg;
 
-               if (adapter->ops->phy_read
-                   && adapter->ops->phy_read(adapter, port->portnum,
-                                             NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                             &status) != 0)
+               if (adapter->phy_read
+                   && adapter->phy_read(adapter, port->portnum,
+                                        NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+                                        &status) != 0)
                        return -EIO;
 
                /* speed */
@@ -257,10 +256,10 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        netxen_clear_phy_duplex(status);
                if (ecmd->duplex == DUPLEX_FULL)
                        netxen_set_phy_duplex(status);
-               if (adapter->ops->phy_write
-                   && adapter->ops->phy_write(adapter, port->portnum,
-                                              NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                              *((int *)&status)) != 0)
+               if (adapter->phy_write
+                   && adapter->phy_write(adapter, port->portnum,
+                                         NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+                                         *((int *)&status)) != 0)
                        return -EIO;
                else {
                        port->link_speed = ecmd->speed;
@@ -422,10 +421,10 @@ static u32 netxen_nic_get_link(struct net_device *dev)
 
        /* read which mode */
        if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
-               if (adapter->ops->phy_read
-                   && adapter->ops->phy_read(adapter, port->portnum,
-                                             NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                             &status) != 0)
+               if (adapter->phy_read
+                   && adapter->phy_read(adapter, port->portnum,
+                                        NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+                                        &status) != 0)
                        return -EIO;
                else
                        return (netxen_get_phy_link(status));
@@ -460,20 +459,22 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
 {
        struct netxen_port *port = netdev_priv(dev);
        struct netxen_adapter *adapter = port->adapter;
-       int i, j;
+       int i;
 
        ring->rx_pending = 0;
+       ring->rx_jumbo_pending = 0;
        for (i = 0; i < MAX_RCV_CTX; ++i) {
-               for (j = 0; j < NUM_RCV_DESC_RINGS; j++)
-                       ring->rx_pending +=
-                           adapter->recv_ctx[i].rcv_desc[j].rcv_pending;
+               ring->rx_pending += adapter->recv_ctx[i].
+                   rcv_desc[RCV_DESC_NORMAL_CTXID].rcv_pending;
+               ring->rx_jumbo_pending += adapter->recv_ctx[i].
+                   rcv_desc[RCV_DESC_JUMBO_CTXID].rcv_pending;
        }
 
        ring->rx_max_pending = adapter->max_rx_desc_count;
        ring->tx_max_pending = adapter->max_tx_desc_count;
+       ring->rx_jumbo_max_pending = adapter->max_jumbo_rx_desc_count;
        ring->rx_mini_max_pending = 0;
        ring->rx_mini_pending = 0;
-       ring->rx_jumbo_max_pending = 0;
        ring->rx_jumbo_pending = 0;
 }
 
@@ -526,10 +527,10 @@ netxen_nic_set_pauseparam(struct net_device *dev,
                                    *(u32 *) (&val));
                /* set autoneg */
                autoneg = pause->autoneg;
-               if (adapter->ops->phy_write
-                   && adapter->ops->phy_write(adapter, port->portnum,
-                                              NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                              (__le32) autoneg) != 0)
+               if (adapter->phy_write
+                   && adapter->phy_write(adapter, port->portnum,
+                                         NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+                                         (__le32) autoneg) != 0)
                        return -EIO;
                else {
                        port->link_autoneg = pause->autoneg;
index 72c6ec4ee2a06d876841f0e4a79c18fd1e69a253..fe8b675f9e72c69deefc464af835b91f1ae6cfaf 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
index 105c24f0ad4c75249f683b2c893600bbfcabc7d2..9147b6048dfb772be99f911a09d474cc484ebf06 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -42,7 +42,7 @@
 
 #define NETXEN_FLASH_BASE      (BOOTLD_START)
 #define NETXEN_PHANTOM_MEM_BASE        (NETXEN_FLASH_BASE)
-#define NETXEN_MAX_MTU         8000
+#define NETXEN_MAX_MTU         8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
 #define NETXEN_MIN_MTU         64
 #define NETXEN_ETH_FCS_SIZE     4
 #define NETXEN_ENET_HEADER_SIZE 14
@@ -81,8 +81,8 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
        DPRINTK(INFO, "valid ether addr\n");
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 
-       if (adapter->ops->macaddr_set)
-               adapter->ops->macaddr_set(port, addr->sa_data);
+       if (adapter->macaddr_set)
+               adapter->macaddr_set(port, addr->sa_data);
 
        return 0;
 }
@@ -99,17 +99,17 @@ void netxen_nic_set_multi(struct net_device *netdev)
 
        mc_ptr = netdev->mc_list;
        if (netdev->flags & IFF_PROMISC) {
-               if (adapter->ops->set_promisc)
-                       adapter->ops->set_promisc(adapter,
-                                                 port->portnum,
-                                                 NETXEN_NIU_PROMISC_MODE);
+               if (adapter->set_promisc)
+                       adapter->set_promisc(adapter,
+                                            port->portnum,
+                                            NETXEN_NIU_PROMISC_MODE);
        } else {
-               if (adapter->ops->unset_promisc &&
+               if (adapter->unset_promisc &&
                    adapter->ahw.boardcfg.board_type
                    != NETXEN_BRDTYPE_P2_SB31_10G_IMEZ)
-                       adapter->ops->unset_promisc(adapter,
-                                                   port->portnum,
-                                                   NETXEN_NIU_NON_PROMISC_MODE);
+                       adapter->unset_promisc(adapter,
+                                              port->portnum,
+                                              NETXEN_NIU_NON_PROMISC_MODE);
        }
        if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
                netxen_nic_mcr_set_mode_select(netxen_mac_addr_cntl_data, 0x03);
@@ -160,8 +160,8 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
                return -EINVAL;
        }
 
-       if (adapter->ops->set_mtu)
-               adapter->ops->set_mtu(port, mtu);
+       if (adapter->set_mtu)
+               adapter->set_mtu(port, mtu);
        netdev->mtu = mtu;
 
        return 0;
@@ -176,22 +176,18 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
        struct netxen_hardware_context *hw = &adapter->ahw;
        u32 state = 0;
        void *addr;
-       void *pause_addr;
        int loops = 0, err = 0;
        int ctx, ring;
        u32 card_cmdring = 0;
-       struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
        struct netxen_recv_context *recv_ctx;
        struct netxen_rcv_desc_ctx *rcv_desc;
 
-       DPRINTK(INFO, "crb_base: %lx %lx", NETXEN_PCI_CRBSPACE,
+       DPRINTK(INFO, "crb_base: %lx %x", NETXEN_PCI_CRBSPACE,
                PCI_OFFSET_SECOND_RANGE(adapter, NETXEN_PCI_CRBSPACE));
-       DPRINTK(INFO, "cam base: %lx %lx", NETXEN_CRB_CAM,
+       DPRINTK(INFO, "cam base: %lx %x", NETXEN_CRB_CAM,
                pci_base_offset(adapter, NETXEN_CRB_CAM));
-       DPRINTK(INFO, "cam RAM: %lx %lx", NETXEN_CAM_RAM_BASE,
+       DPRINTK(INFO, "cam RAM: %lx %x", NETXEN_CAM_RAM_BASE,
                pci_base_offset(adapter, NETXEN_CAM_RAM_BASE));
-       DPRINTK(INFO, "NIC base:%lx %lx\n", NIC_CRB_BASE_PORT1,
-               pci_base_offset(adapter, NIC_CRB_BASE_PORT1));
 
        /* Window 1 call */
        card_cmdring = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_CMDRING));
@@ -226,33 +222,42 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
        DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n");
 
        addr = netxen_alloc(adapter->ahw.pdev,
-                           sizeof(struct cmd_desc_type0) *
-                           adapter->max_tx_desc_count,
-                           &hw->cmd_desc_phys_addr, &hw->cmd_desc_pdev);
+                           sizeof(struct netxen_ring_ctx) +
+                           sizeof(uint32_t),
+                           (dma_addr_t *) & adapter->ctx_desc_phys_addr,
+                           &adapter->ctx_desc_pdev);
 
+       printk("ctx_desc_phys_addr: 0x%llx\n",
+              (u64) adapter->ctx_desc_phys_addr);
        if (addr == NULL) {
                DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
-               return -ENOMEM;
+               err = -ENOMEM;
+               return err;
        }
+       memset(addr, 0, sizeof(struct netxen_ring_ctx));
+       adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
+       adapter->ctx_desc->cmd_consumer_offset = adapter->ctx_desc_phys_addr
+           + sizeof(struct netxen_ring_ctx);
+       adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
+                                             sizeof(struct netxen_ring_ctx));
+
+       addr = pci_alloc_consistent(adapter->ahw.pdev,
+                                   sizeof(struct cmd_desc_type0) *
+                                   adapter->max_tx_desc_count,
+                                   (dma_addr_t *) & hw->cmd_desc_phys_addr);
+       printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
 
-       pause_addr = netxen_alloc(adapter->ahw.pdev, 512,
-                                 (dma_addr_t *) & hw->pause_physaddr,
-                                 &hw->pause_pdev);
-       if (pause_addr == NULL) {
-               DPRINTK(1, ERR, "bad return from pci_alloc_consistent\n");
+       if (addr == NULL) {
+               DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
+               netxen_free_hw_resources(adapter);
                return -ENOMEM;
        }
 
-       hw->pauseaddr = (char *)pause_addr;
-       {
-               u64 *ptr = (u64 *) pause_addr;
-               *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
-               *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
-               *ptr++ = NETXEN_NIC_UNIT_PAUSE_ADDR;
-               *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
-               *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR1;
-               *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR2;
-       }
+       adapter->ctx_desc->cmd_ring_addr_lo =
+           hw->cmd_desc_phys_addr & 0xffffffffUL;
+       adapter->ctx_desc->cmd_ring_addr_hi =
+           ((u64) hw->cmd_desc_phys_addr >> 32);
+       adapter->ctx_desc->cmd_ring_size = adapter->max_tx_desc_count;
 
        hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
 
@@ -273,6 +278,12 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
                                return err;
                        }
                        rcv_desc->desc_head = (struct rcv_desc *)addr;
+                       adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_lo =
+                           rcv_desc->phys_addr & 0xffffffffUL;
+                       adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_hi =
+                           ((u64) rcv_desc->phys_addr >> 32);
+                       adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
+                           rcv_desc->max_rx_desc_count;
                }
 
                addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
@@ -286,47 +297,21 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
                        return err;
                }
                recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
-               for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
-                       rcv_desc = &recv_ctx->rcv_desc[ring];
-                       rcv_desc_crb =
-                           &recv_crb_registers[ctx].rcv_desc_crb[ring];
-                       DPRINTK(INFO, "ring #%d crb global ring reg 0x%x\n",
-                               ring, rcv_desc_crb->crb_globalrcv_ring);
-                       /* Window = 1 */
-                       writel(lower32(rcv_desc->phys_addr),
-                              NETXEN_CRB_NORMALIZE(adapter,
-                                                   rcv_desc_crb->
-                                                   crb_globalrcv_ring));
-                       DPRINTK(INFO, "GLOBAL_RCV_RING ctx %d, addr 0x%x"
-                               " val 0x%llx,"
-                               " virt %p\n", ctx,
-                               rcv_desc_crb->crb_globalrcv_ring,
-                               (unsigned long long)rcv_desc->phys_addr,
-                               +rcv_desc->desc_head);
-               }
+               adapter->ctx_desc->sts_ring_addr_lo =
+                   recv_ctx->rcv_status_desc_phys_addr & 0xffffffffUL;
+               adapter->ctx_desc->sts_ring_addr_hi =
+                   ((u64) recv_ctx->rcv_status_desc_phys_addr >> 32);
+               adapter->ctx_desc->sts_ring_size = adapter->max_rx_desc_count;
 
-               /* Window = 1 */
-               writel(lower32(recv_ctx->rcv_status_desc_phys_addr),
-                      NETXEN_CRB_NORMALIZE(adapter,
-                                           recv_crb_registers[ctx].
-                                           crb_rcvstatus_ring));
-               DPRINTK(INFO, "RCVSTATUS_RING, ctx %d, addr 0x%x,"
-                       " val 0x%x,virt%p\n",
-                       ctx,
-                       recv_crb_registers[ctx].crb_rcvstatus_ring,
-                       (unsigned long long)recv_ctx->rcv_status_desc_phys_addr,
-                       recv_ctx->rcv_status_desc_head);
        }
        /* Window = 1 */
-       writel(lower32(hw->pause_physaddr),
-              NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_LO));
-       writel(upper32(hw->pause_physaddr),
-              NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_HI));
-
-       writel(lower32(hw->cmd_desc_phys_addr),
-              NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
-       writel(upper32(hw->cmd_desc_phys_addr),
-              NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_HI));
+
+       writel(lower32(adapter->ctx_desc_phys_addr),
+              NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_LO));
+       writel(upper32(adapter->ctx_desc_phys_addr),
+              NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_HI));
+       writel(NETXEN_CTX_SIGNATURE,
+              NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_SIGNATURE_REG));
        return err;
 }
 
@@ -336,6 +321,15 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
        struct netxen_rcv_desc_ctx *rcv_desc;
        int ctx, ring;
 
+       if (adapter->ctx_desc != NULL) {
+               pci_free_consistent(adapter->ctx_desc_pdev,
+                                   sizeof(struct netxen_ring_ctx) +
+                                   sizeof(uint32_t),
+                                   adapter->ctx_desc,
+                                   adapter->ctx_desc_phys_addr);
+               adapter->ctx_desc = NULL;
+       }
+
        if (adapter->ahw.cmd_desc_head != NULL) {
                pci_free_consistent(adapter->ahw.cmd_desc_pdev,
                                    sizeof(struct cmd_desc_type0) *
@@ -344,11 +338,9 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
                                    adapter->ahw.cmd_desc_phys_addr);
                adapter->ahw.cmd_desc_head = NULL;
        }
-       if (adapter->ahw.pauseaddr != NULL) {
-               pci_free_consistent(adapter->ahw.pause_pdev, 512,
-                                   adapter->ahw.pauseaddr,
-                                   adapter->ahw.pause_physaddr);
-               adapter->ahw.pauseaddr = NULL;
+       /* Special handling: there are 2 ports on this board */
+       if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
+               adapter->ahw.max_ports = 2;
        }
 
        for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
@@ -383,19 +375,22 @@ void netxen_tso_check(struct netxen_adapter *adapter,
                desc->total_hdr_length = sizeof(struct ethhdr) +
                    ((skb->nh.iph)->ihl * sizeof(u32)) +
                    ((skb->h.th)->doff * sizeof(u32));
-               desc->opcode = TX_TCP_LSO;
+               netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
        } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
                if (skb->nh.iph->protocol == IPPROTO_TCP) {
-                       desc->opcode = TX_TCP_PKT;
+                       netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
                } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
-                       desc->opcode = TX_UDP_PKT;
+                       netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
                } else {
                        return;
                }
        }
        adapter->stats.xmitcsummed++;
-       CMD_DESC_TCP_HDR_OFFSET_WRT(desc, skb->h.raw - skb->data);
-       desc->length_tcp_hdr = cpu_to_le32(desc->length_tcp_hdr);
+       desc->tcp_hdr_offset = skb->h.raw - skb->data;
+       netxen_set_cmd_desc_totallength(desc,
+                                       cpu_to_le32
+                                       (netxen_get_cmd_desc_totallength
+                                        (desc)));
        desc->ip_hdr_offset = skb->nh.raw - skb->data;
 }
 
@@ -648,7 +643,7 @@ void netxen_nic_reg_write(struct netxen_adapter *adapter, u64 off, u32 val)
 
        addr = NETXEN_CRB_NORMALIZE(adapter, off);
        DPRINTK(INFO, "writing to base %lx offset %llx addr %p data %x\n",
-               pci_base(adapter, off), off, addr);
+               pci_base(adapter, off), off, addr, val);
        writel(val, addr);
 
 }
@@ -660,7 +655,7 @@ int netxen_nic_reg_read(struct netxen_adapter *adapter, u64 off)
 
        addr = NETXEN_CRB_NORMALIZE(adapter, off);
        DPRINTK(INFO, "reading from base %lx offset %llx addr %p\n",
-               adapter->ahw.pci_base, off, addr);
+               pci_base(adapter, off), off, addr);
        val = readl(addr);
        writel(val, addr);
 
@@ -848,8 +843,8 @@ void netxen_nic_stop_all_ports(struct netxen_adapter *adapter)
 
        for (port_nr = 0; port_nr < adapter->ahw.max_ports; port_nr++) {
                port = adapter->port[port_nr];
-               if (adapter->ops->stop_port)
-                       adapter->ops->stop_port(adapter, port->portnum);
+               if (adapter->stop_port)
+                       adapter->stop_port(adapter, port->portnum);
        }
 }
 
@@ -873,13 +868,13 @@ void netxen_nic_set_link_parameters(struct netxen_port *port)
 {
        struct netxen_adapter *adapter = port->adapter;
        __le32 status;
-       u16 autoneg;
+       __le32 autoneg;
        __le32 mode;
 
        netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
        if (netxen_get_niu_enable_ge(mode)) {   /* Gb 10/100/1000 Mbps mode */
-               if (adapter->ops->phy_read
-                   && adapter->ops->
+               if (adapter->phy_read
+                   && adapter->
                    phy_read(adapter, port->portnum,
                             NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
                             &status) == 0) {
@@ -909,11 +904,11 @@ void netxen_nic_set_link_parameters(struct netxen_port *port)
                                        port->link_duplex = -1;
                                        break;
                                }
-                               if (adapter->ops->phy_read
-                                   && adapter->ops->
+                               if (adapter->phy_read
+                                   && adapter->
                                    phy_read(adapter, port->portnum,
                                             NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                            (__le32 *) & autoneg) != 0)
+                                            &autoneg) != 0)
                                        port->link_autoneg = autoneg;
                        } else
                                goto link_down;
@@ -1008,3 +1003,291 @@ int netxen_crb_read_val(struct netxen_adapter *adapter, unsigned long off)
        netxen_nic_hw_read_wx(adapter, off, &data, 4);
        return data;
 }
+
+int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
+                             void *data, int len)
+{
+       void *addr;
+       u64 offset = off;
+       u8 *mem_ptr = NULL;
+       unsigned long mem_base;
+       unsigned long mem_page;
+
+       if (ADDR_IN_WINDOW1(off)) {
+               addr = NETXEN_CRB_NORMALIZE(adapter, off);
+               if (!addr) {
+                       mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+                       offset = NETXEN_CRB_NORMAL(off);
+                       mem_page = offset & PAGE_MASK;
+                       if (mem_page != ((offset + len - 1) & PAGE_MASK))
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+                       else
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE);
+                       if (mem_ptr == 0UL) {
+                               return 1;
+                       }
+                       addr = mem_ptr;
+                       addr += offset & (PAGE_SIZE - 1);
+               }
+       } else {
+               addr = pci_base_offset(adapter, off);
+               if (!addr) {
+                       mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+                       mem_page = off & PAGE_MASK;
+                       if (mem_page != ((off + len - 1) & PAGE_MASK))
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+                       else
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE);
+                       if (mem_ptr == 0UL) {
+                               return 1;
+                       }
+                       addr = mem_ptr;
+                       addr += off & (PAGE_SIZE - 1);
+               }
+               netxen_nic_pci_change_crbwindow(adapter, 0);
+       }
+       switch (len) {
+       case 1:
+               writeb(*(u8 *) data, addr);
+               break;
+       case 2:
+               writew(*(u16 *) data, addr);
+               break;
+       case 4:
+               writel(*(u32 *) data, addr);
+               break;
+       case 8:
+               writeq(*(u64 *) data, addr);
+               break;
+       default:
+               DPRINTK(INFO,
+                       "writing data %lx to offset %llx, num words=%d\n",
+                       *(unsigned long *)data, off, (len >> 3));
+
+               netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+                                           (len >> 3));
+               break;
+       }
+
+       if (!ADDR_IN_WINDOW1(off))
+               netxen_nic_pci_change_crbwindow(adapter, 1);
+       if (mem_ptr)
+               iounmap(mem_ptr);
+       return 0;
+}
+
+int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
+                            void *data, int len)
+{
+       void *addr;
+       u64 offset;
+       u8 *mem_ptr = NULL;
+       unsigned long mem_base;
+       unsigned long mem_page;
+
+       if (ADDR_IN_WINDOW1(off)) {
+               addr = NETXEN_CRB_NORMALIZE(adapter, off);
+               if (!addr) {
+                       mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+                       offset = NETXEN_CRB_NORMAL(off);
+                       mem_page = offset & PAGE_MASK;
+                       if (mem_page != ((offset + len - 1) & PAGE_MASK))
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+                       else
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE);
+                       if (mem_ptr == 0UL) {
+                               *(u8 *) data = 0;
+                               return 1;
+                       }
+                       addr = mem_ptr;
+                       addr += offset & (PAGE_SIZE - 1);
+               }
+       } else {
+               addr = pci_base_offset(adapter, off);
+               if (!addr) {
+                       mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+                       mem_page = off & PAGE_MASK;
+                       if (mem_page != ((off + len - 1) & PAGE_MASK))
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+                       else
+                               mem_ptr =
+                                   ioremap(mem_base + mem_page, PAGE_SIZE);
+                       if (mem_ptr == 0UL)
+                               return 1;
+                       addr = mem_ptr;
+                       addr += off & (PAGE_SIZE - 1);
+               }
+               netxen_nic_pci_change_crbwindow(adapter, 0);
+       }
+       switch (len) {
+       case 1:
+               *(u8 *) data = readb(addr);
+               break;
+       case 2:
+               *(u16 *) data = readw(addr);
+               break;
+       case 4:
+               *(u32 *) data = readl(addr);
+               break;
+       case 8:
+               *(u64 *) data = readq(addr);
+               break;
+       default:
+               netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+                                          (len >> 3));
+               break;
+       }
+       if (!ADDR_IN_WINDOW1(off))
+               netxen_nic_pci_change_crbwindow(adapter, 1);
+       if (mem_ptr)
+               iounmap(mem_ptr);
+       return 0;
+}
+
+int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter, u64 off,
+                                  void *data, int size)
+{
+       void *addr;
+       int ret = 0;
+       u8 *mem_ptr = NULL;
+       unsigned long mem_base;
+       unsigned long mem_page;
+
+       if (data == NULL || off > (128 * 1024 * 1024)) {
+               printk(KERN_ERR "%s: data: %p off:%llx\n",
+                      netxen_nic_driver_name, data, off);
+               return 1;
+       }
+       off = netxen_nic_pci_set_window(adapter, off);
+       /* Corner case : Malicious user tried to break the driver by reading
+          last few bytes in ranges and tries to read further addresses.
+        */
+       if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
+               printk(KERN_ERR "%s: Invalid access to memory address range"
+                      " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
+                      off + size);
+               return 1;
+       }
+       addr = pci_base_offset(adapter, off);
+       DPRINTK(INFO, "writing data %llx to offset %llx\n",
+               *(unsigned long long *)data, off);
+       if (!addr) {
+               mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+               mem_page = off & PAGE_MASK;
+               /* Map two pages whenever user tries to access addresses in two
+                  consecutive pages.
+                */
+               if (mem_page != ((off + size - 1) & PAGE_MASK))
+                       mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+               else
+                       mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+               if (mem_ptr == 0UL) {
+                       return 1;
+               }
+               addr = mem_ptr;
+               addr += off & (PAGE_SIZE - 1);
+       }
+       switch (size) {
+       case 1:
+               writeb(*(u8 *) data, addr);
+               break;
+       case 2:
+               writew(*(u16 *) data, addr);
+               break;
+       case 4:
+               writel(*(u32 *) data, addr);
+               break;
+       case 8:
+               writeq(*(u64 *) data, addr);
+               break;
+       default:
+               DPRINTK(INFO,
+                       "writing data %lx to offset %llx, num words=%d\n",
+                       *(unsigned long *)data, off, (size >> 3));
+
+               netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
+                                           (size >> 3));
+               break;
+       }
+
+       if (mem_ptr)
+               iounmap(mem_ptr);
+       DPRINTK(INFO, "wrote %llx\n", *(unsigned long long *)data);
+
+       return ret;
+}
+
+int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
+                                 u64 off, void *data, int size)
+{
+       void *addr;
+       int ret = 0;
+       u8 *mem_ptr = NULL;
+       unsigned long mem_base;
+       unsigned long mem_page;
+
+       if (data == NULL || off > (128 * 1024 * 1024)) {
+               printk(KERN_ERR "%s: data: %p off:%llx\n",
+                      netxen_nic_driver_name, data, off);
+               return 1;
+       }
+       off = netxen_nic_pci_set_window(adapter, off);
+       /* Corner case : Malicious user tried to break the driver by reading
+          last few bytes in ranges and tries to read further addresses.
+        */
+       if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
+               printk(KERN_ERR "%s: Invalid access to memory address range"
+                      " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
+                      off + size);
+               return 1;
+       }
+       addr = pci_base_offset(adapter, off);
+       if (!addr) {
+               mem_base = pci_resource_start(adapter->ahw.pdev, 0);
+               mem_page = off & PAGE_MASK;
+               /* Map two pages whenever user tries to access addresses in two
+                  consecutive pages.
+                */
+               if (mem_page != ((off + size - 1) & PAGE_MASK))
+                       mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
+               else
+                       mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
+               if (mem_ptr == 0UL) {
+                       *(u8 *) data = 0;
+                       return 1;
+               }
+               addr = mem_ptr;
+               addr += off & (PAGE_SIZE - 1);
+       }
+       switch (size) {
+       case 1:
+               *(u8 *) data = readb(addr);
+               break;
+       case 2:
+               *(u16 *) data = readw(addr);
+               break;
+       case 4:
+               *(u32 *) data = readl(addr);
+               break;
+       case 8:
+               *(u64 *) data = readq(addr);
+               break;
+       default:
+               netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
+                                          (size >> 3));
+               break;
+       }
+
+       if (mem_ptr)
+               iounmap(mem_ptr);
+       DPRINTK(INFO, "read %llx\n", *(unsigned long long *)data);
+
+       return ret;
+}
index 201a636b7ab8b199e3f668f31bd30e2317b3a90d..0685633a9c1ea9e4626339246fcd1779fe2ebe0e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -83,8 +83,8 @@ struct netxen_adapter;
 #define NETXEN_PCI_MAPSIZE_BYTES  (NETXEN_PCI_MAPSIZE << 20)
 
 #define NETXEN_NIC_LOCKED_READ_REG(X, Y)       \
-       addr = pci_base_offset(adapter, (X));   \
-       *(u32 *)Y = readl(addr);
+       addr = pci_base_offset(adapter, X);     \
+       *(u32 *)Y = readl((void __iomem*) addr);
 
 struct netxen_port;
 void netxen_nic_set_link_parameters(struct netxen_port *port);
index 0dca029bc3e5ae6a4fdda8bd6999e5c480d7f9ed..869725f0bb1861cda3e726664c7e3c23f9e7b507 100644 (file)
@@ -1,25 +1,25 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
- *                            
+ *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *                                   
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -137,6 +137,8 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
                return err;
        }
        /* Window 1 call */
+       writel(MPORT_SINGLE_FUNCTION_MODE,
+              NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE));
        writel(PHAN_INITIALIZE_ACK,
               NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
 
@@ -184,15 +186,12 @@ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter)
                        for (i = 0; i < num_rx_bufs; i++) {
                                rx_buf->ref_handle = i;
                                rx_buf->state = NETXEN_BUFFER_FREE;
-
                                DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:"
                                        "%p\n", ctxid, i, rx_buf);
                                rx_buf++;
                        }
                }
        }
-       DPRINTK(INFO, "initialized buffers for %s and %s\n",
-               "adapter->free_cmd_buf_list", "adapter->free_rxbuf");
 }
 
 void netxen_initialize_adapter_hw(struct netxen_adapter *adapter)
@@ -212,37 +211,36 @@ void netxen_initialize_adapter_hw(struct netxen_adapter *adapter)
 
 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
 {
-       struct netxen_drvops *ops = adapter->ops;
        switch (adapter->ahw.board_type) {
        case NETXEN_NIC_GBE:
-               ops->enable_phy_interrupts =
+               adapter->enable_phy_interrupts =
                    netxen_niu_gbe_enable_phy_interrupts;
-               ops->disable_phy_interrupts =
+               adapter->disable_phy_interrupts =
                    netxen_niu_gbe_disable_phy_interrupts;
-               ops->handle_phy_intr = netxen_nic_gbe_handle_phy_intr;
-               ops->macaddr_set = netxen_niu_macaddr_set;
-               ops->set_mtu = netxen_nic_set_mtu_gb;
-               ops->set_promisc = netxen_niu_set_promiscuous_mode;
-               ops->unset_promisc = netxen_niu_set_promiscuous_mode;
-               ops->phy_read = netxen_niu_gbe_phy_read;
-               ops->phy_write = netxen_niu_gbe_phy_write;
-               ops->init_port = netxen_niu_gbe_init_port;
-               ops->init_niu = netxen_nic_init_niu_gb;
-               ops->stop_port = netxen_niu_disable_gbe_port;
+               adapter->handle_phy_intr = netxen_nic_gbe_handle_phy_intr;
+               adapter->macaddr_set = netxen_niu_macaddr_set;
+               adapter->set_mtu = netxen_nic_set_mtu_gb;
+               adapter->set_promisc = netxen_niu_set_promiscuous_mode;
+               adapter->unset_promisc = netxen_niu_set_promiscuous_mode;
+               adapter->phy_read = netxen_niu_gbe_phy_read;
+               adapter->phy_write = netxen_niu_gbe_phy_write;
+               adapter->init_port = netxen_niu_gbe_init_port;
+               adapter->init_niu = netxen_nic_init_niu_gb;
+               adapter->stop_port = netxen_niu_disable_gbe_port;
                break;
 
        case NETXEN_NIC_XGBE:
-               ops->enable_phy_interrupts =
+               adapter->enable_phy_interrupts =
                    netxen_niu_xgbe_enable_phy_interrupts;
-               ops->disable_phy_interrupts =
+               adapter->disable_phy_interrupts =
                    netxen_niu_xgbe_disable_phy_interrupts;
-               ops->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr;
-               ops->macaddr_set = netxen_niu_xg_macaddr_set;
-               ops->set_mtu = netxen_nic_set_mtu_xgb;
-               ops->init_port = netxen_niu_xg_init_port;
-               ops->set_promisc = netxen_niu_xg_set_promiscuous_mode;
-               ops->unset_promisc = netxen_niu_xg_set_promiscuous_mode;
-               ops->stop_port = netxen_niu_disable_xg_port;
+               adapter->handle_phy_intr = netxen_nic_xgbe_handle_phy_intr;
+               adapter->macaddr_set = netxen_niu_xg_macaddr_set;
+               adapter->set_mtu = netxen_nic_set_mtu_xgb;
+               adapter->init_port = netxen_niu_xg_init_port;
+               adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
+               adapter->unset_promisc = netxen_niu_xg_set_promiscuous_mode;
+               adapter->stop_port = netxen_niu_disable_xg_port;
                break;
 
        default:
@@ -383,8 +381,8 @@ int netxen_rom_wip_poll(struct netxen_adapter *adapter)
        return 0;
 }
 
-static inline int do_rom_fast_write(struct netxen_adapter *adapter,
-                                   int addr, int data)
+static inline int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
+                                   int data)
 {
        if (netxen_rom_wren(adapter)) {
                return -1;
@@ -622,6 +620,43 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
        return 0;
 }
 
+int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
+{
+       uint64_t addr;
+       uint32_t hi;
+       uint32_t lo;
+
+       adapter->dummy_dma.addr =
+           pci_alloc_consistent(adapter->ahw.pdev,
+                                NETXEN_HOST_DUMMY_DMA_SIZE,
+                                &adapter->dummy_dma.phys_addr);
+       if (adapter->dummy_dma.addr == NULL) {
+               printk("%s: ERROR: Could not allocate dummy DMA memory\n",
+                      __FUNCTION__);
+               return -ENOMEM;
+       }
+
+       addr = (uint64_t) adapter->dummy_dma.phys_addr;
+       hi = (addr >> 32) & 0xffffffff;
+       lo = addr & 0xffffffff;
+
+       writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI));
+       writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO));
+
+       return 0;
+}
+
+void netxen_free_adapter_offload(struct netxen_adapter *adapter)
+{
+       if (adapter->dummy_dma.addr) {
+               pci_free_consistent(adapter->ahw.pdev,
+                                   NETXEN_HOST_DUMMY_DMA_SIZE,
+                                   adapter->dummy_dma.addr,
+                                   adapter->dummy_dma.phys_addr);
+               adapter->dummy_dma.addr = NULL;
+       }
+}
+
 void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
 {
        u32 val = 0;
@@ -656,7 +691,8 @@ int netxen_nic_rx_has_work(struct netxen_adapter *adapter)
                desc_head = recv_ctx->rcv_status_desc_head;
                desc = &desc_head[consumer];
 
-               if (((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST))
+               if (((le16_to_cpu(netxen_get_sts_owner(desc)))
+                    & STATUS_OWNER_HOST))
                        return 1;
        }
 
@@ -710,12 +746,13 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter)
        return rv;
 }
 
-void netxen_watchdog_task(unsigned long v)
+void netxen_watchdog_task(struct work_struct *work)
 {
        int port_num;
        struct netxen_port *port;
        struct net_device *netdev;
-       struct netxen_adapter *adapter = (struct netxen_adapter *)v;
+       struct netxen_adapter *adapter =
+               container_of(work, struct netxen_adapter, watchdog_task);
 
        if (netxen_nic_check_temp(adapter))
                return;
@@ -734,8 +771,8 @@ void netxen_watchdog_task(unsigned long v)
                        netif_wake_queue(netdev);
        }
 
-       if (adapter->ops->handle_phy_intr)
-               adapter->ops->handle_phy_intr(adapter);
+       if (adapter->handle_phy_intr)
+               adapter->handle_phy_intr(adapter);
        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
 }
 
@@ -748,19 +785,19 @@ void
 netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
                   struct status_desc *desc)
 {
-       struct netxen_port *port = adapter->port[STATUS_DESC_PORT(desc)];
+       struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)];
        struct pci_dev *pdev = port->pdev;
        struct net_device *netdev = port->netdev;
-       int index = le16_to_cpu(desc->reference_handle);
+       int index = le16_to_cpu(netxen_get_sts_refhandle(desc));
        struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
        struct netxen_rx_buffer *buffer;
        struct sk_buff *skb;
-       u32 length = le16_to_cpu(desc->total_length);
+       u32 length = le16_to_cpu(netxen_get_sts_totallength(desc));
        u32 desc_ctx;
        struct netxen_rcv_desc_ctx *rcv_desc;
        int ret;
 
-       desc_ctx = STATUS_DESC_TYPE(desc);
+       desc_ctx = netxen_get_sts_type(desc);
        if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
                printk("%s: %s Bad Rcv descriptor ring\n",
                       netxen_nic_driver_name, netdev->name);
@@ -768,20 +805,49 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
        }
 
        rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
+       if (unlikely(index > rcv_desc->max_rx_desc_count)) {
+               DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
+                       index, rcv_desc->max_rx_desc_count);
+               return;
+       }
        buffer = &rcv_desc->rx_buf_arr[index];
+       if (desc_ctx == RCV_DESC_LRO_CTXID) {
+               buffer->lro_current_frags++;
+               if (netxen_get_sts_desc_lro_last_frag(desc)) {
+                       buffer->lro_expected_frags =
+                           netxen_get_sts_desc_lro_cnt(desc);
+                       buffer->lro_length = length;
+               }
+               if (buffer->lro_current_frags != buffer->lro_expected_frags) {
+                       if (buffer->lro_expected_frags != 0) {
+                               printk("LRO: (refhandle:%x) recv frag."
+                                      "wait for last. flags: %x expected:%d"
+                                      "have:%d\n", index,
+                                      netxen_get_sts_desc_lro_last_frag(desc),
+                                      buffer->lro_expected_frags,
+                                      buffer->lro_current_frags);
+                       }
+                       return;
+               }
+       }
 
        pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
                         PCI_DMA_FROMDEVICE);
 
        skb = (struct sk_buff *)buffer->skb;
 
-       if (likely(STATUS_DESC_STATUS(desc) == STATUS_CKSUM_OK)) {
+       if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) {
                port->stats.csummed++;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-       } else
-               skb->ip_summed = CHECKSUM_NONE;
+       }
        skb->dev = netdev;
-       skb_put(skb, length);
+       if (desc_ctx == RCV_DESC_LRO_CTXID) {
+               /* True length was only available on the last pkt */
+               skb_put(skb, buffer->lro_length);
+       } else {
+               skb_put(skb, length);
+       }
+
        skb->protocol = eth_type_trans(skb, netdev);
 
        ret = netif_receive_skb(skb);
@@ -827,6 +893,8 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
        adapter->stats.post_called++;
        buffer->skb = NULL;
        buffer->state = NETXEN_BUFFER_FREE;
+       buffer->lro_current_frags = 0;
+       buffer->lro_expected_frags = 0;
 
        port->stats.no_rcv++;
        port->stats.rxbytes += length;
@@ -839,6 +907,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
        struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
        struct status_desc *desc;       /* used to read status desc here */
        u32 consumer = recv_ctx->status_rx_consumer;
+       u32 producer = 0;
        int count = 0, ring;
 
        DPRINTK(INFO, "procesing receive\n");
@@ -850,18 +919,22 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
         */
        while (count < max) {
                desc = &desc_head[consumer];
-               if (!((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) {
-                       DPRINTK(ERR, "desc %p ownedby %x\n", desc, desc->owner);
+               if (!
+                   (le16_to_cpu(netxen_get_sts_owner(desc)) &
+                    STATUS_OWNER_HOST)) {
+                       DPRINTK(ERR, "desc %p ownedby %x\n", desc,
+                               netxen_get_sts_owner(desc));
                        break;
                }
                netxen_process_rcv(adapter, ctxid, desc);
-               desc->owner = STATUS_OWNER_PHANTOM;
+               netxen_clear_sts_owner(desc);
+               netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
                consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
                count++;
        }
        if (count) {
                for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
-                       netxen_post_rx_buffers(adapter, ctxid, ring);
+                       netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
                }
        }
 
@@ -869,6 +942,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
        if (count) {
                adapter->stats.process_rcv++;
                recv_ctx->status_rx_consumer = consumer;
+               recv_ctx->status_rx_producer = producer;
 
                /* Window = 1 */
                writel(consumer,
@@ -881,12 +955,13 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
 }
 
 /* Process Command status ring */
-void netxen_process_cmd_ring(unsigned long data)
+int netxen_process_cmd_ring(unsigned long data)
 {
        u32 last_consumer;
        u32 consumer;
        struct netxen_adapter *adapter = (struct netxen_adapter *)data;
-       int count = 0;
+       int count1 = 0;
+       int count2 = 0;
        struct netxen_cmd_buffer *buffer;
        struct netxen_port *port;       /* port #1 */
        struct netxen_port *nport;
@@ -895,6 +970,7 @@ void netxen_process_cmd_ring(unsigned long data)
        u32 i;
        struct sk_buff *skb = NULL;
        int p;
+       int done;
 
        spin_lock(&adapter->tx_lock);
        last_consumer = adapter->last_cmd_consumer;
@@ -904,14 +980,13 @@ void netxen_process_cmd_ring(unsigned long data)
         * number as part of the descriptor. This way we will be able to get
         * the netdev which is associated with that device.
         */
-       consumer =
-           readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
 
+       consumer = *(adapter->cmd_consumer);
        if (last_consumer == consumer) {        /* Ring is empty    */
                DPRINTK(INFO, "last_consumer %d == consumer %d\n",
                        last_consumer, consumer);
                spin_unlock(&adapter->tx_lock);
-               return;
+               return 1;
        }
 
        adapter->proc_cmd_buf_counter++;
@@ -922,7 +997,7 @@ void netxen_process_cmd_ring(unsigned long data)
         */
        spin_unlock(&adapter->tx_lock);
 
-       while ((last_consumer != consumer) && (count < MAX_STATUS_HANDLE)) {
+       while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) {
                buffer = &adapter->cmd_buf_arr[last_consumer];
                port = adapter->port[buffer->port];
                pdev = port->pdev;
@@ -948,24 +1023,24 @@ void netxen_process_cmd_ring(unsigned long data)
                             && netif_carrier_ok(port->netdev))
                    && ((jiffies - port->netdev->trans_start) >
                        port->netdev->watchdog_timeo)) {
-                       schedule_work(&port->adapter->tx_timeout_task);
+                       SCHEDULE_WORK(&port->adapter->tx_timeout_task);
                }
 
                last_consumer = get_next_index(last_consumer,
                                               adapter->max_tx_desc_count);
-               count++;
+               count1++;
        }
-       adapter->stats.noxmitdone += count;
+       adapter->stats.noxmitdone += count1;
 
-       count = 0;
+       count2 = 0;
        spin_lock(&adapter->tx_lock);
        if ((--adapter->proc_cmd_buf_counter) == 0) {
                adapter->last_cmd_consumer = last_consumer;
                while ((adapter->last_cmd_consumer != consumer)
-                      && (count < MAX_STATUS_HANDLE)) {
+                      && (count2 < MAX_STATUS_HANDLE)) {
                        buffer =
                            &adapter->cmd_buf_arr[adapter->last_cmd_consumer];
-                       count++;
+                       count2++;
                        if (buffer->skb)
                                break;
                        else
@@ -974,7 +1049,7 @@ void netxen_process_cmd_ring(unsigned long data)
                                                   adapter->max_tx_desc_count);
                }
        }
-       if (count) {
+       if (count1 || count2) {
                for (p = 0; p < adapter->ahw.max_ports; p++) {
                        nport = adapter->port[p];
                        if (netif_queue_stopped(nport->netdev)
@@ -984,10 +1059,30 @@ void netxen_process_cmd_ring(unsigned long data)
                        }
                }
        }
+       /*
+        * If everything is freed up to consumer then check if the ring is full
+        * If the ring is full then check if more needs to be freed and
+        * schedule the call back again.
+        *
+        * This happens when there are 2 CPUs. One could be freeing and the
+        * other filling it. If the ring is full when we get out of here and
+        * the card has already interrupted the host then the host can miss the
+        * interrupt.
+        *
+        * There is still a possible race condition and the host could miss an
+        * interrupt. The card has to take care of this.
+        */
+       if (adapter->last_cmd_consumer == consumer &&
+           (((adapter->cmd_producer + 1) %
+             adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) {
+               consumer = *(adapter->cmd_consumer);
+       }
+       done = (adapter->last_cmd_consumer == consumer);
 
        spin_unlock(&adapter->tx_lock);
        DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer,
                __FUNCTION__);
+       return (done);
 }
 
 /*
@@ -999,17 +1094,16 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
        struct sk_buff *skb;
        struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
        struct netxen_rcv_desc_ctx *rcv_desc = NULL;
-       struct netxen_recv_crb *crbarea = &recv_crb_registers[ctx];
-       struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
-       u32 producer;
+       uint producer;
        struct rcv_desc *pdesc;
        struct netxen_rx_buffer *buffer;
        int count = 0;
        int index = 0;
+       netxen_ctx_msg msg = 0;
+       dma_addr_t dma;
 
        adapter->stats.post_called++;
        rcv_desc = &recv_ctx->rcv_desc[ringid];
-       rcv_desc_crb = &crbarea->rcv_desc_crb[ringid];
 
        producer = rcv_desc->producer;
        index = rcv_desc->begin_alloc;
@@ -1019,6 +1113,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
                skb = dev_alloc_skb(rcv_desc->skb_size);
                if (unlikely(!skb)) {
                        /*
+                        * TODO
                         * We need to schedule the posting of buffers to the pegs.
                         */
                        rcv_desc->begin_alloc = index;
@@ -1026,9 +1121,105 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
                                " allocated only %d buffers\n", count);
                        break;
                }
+
                count++;        /* now there should be no failure */
                pdesc = &rcv_desc->desc_head[producer];
-               skb_reserve(skb, NET_IP_ALIGN);
+
+#if defined(XGB_DEBUG)
+               *(unsigned long *)(skb->head) = 0xc0debabe;
+               if (skb_is_nonlinear(skb)) {
+                       printk("Allocated SKB @%p is nonlinear\n");
+               }
+#endif
+               skb_reserve(skb, 2);
+               /* This will be setup when we receive the
+                * buffer after it has been filled  FSL  TBD TBD
+                * skb->dev = netdev;
+                */
+               dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
+                                    PCI_DMA_FROMDEVICE);
+               pdesc->addr_buffer = dma;
+               buffer->skb = skb;
+               buffer->state = NETXEN_BUFFER_BUSY;
+               buffer->dma = dma;
+               /* make a rcv descriptor  */
+               pdesc->reference_handle = buffer->ref_handle;
+               pdesc->buffer_length = rcv_desc->dma_size;
+               DPRINTK(INFO, "done writing descripter\n");
+               producer =
+                   get_next_index(producer, rcv_desc->max_rx_desc_count);
+               index = get_next_index(index, rcv_desc->max_rx_desc_count);
+               buffer = &rcv_desc->rx_buf_arr[index];
+       }
+       /* if we did allocate buffers, then write the count to Phantom */
+       if (count) {
+               rcv_desc->begin_alloc = index;
+               rcv_desc->rcv_pending += count;
+               adapter->stats.lastposted = count;
+               adapter->stats.posted += count;
+               rcv_desc->producer = producer;
+               if (rcv_desc->rcv_free >= 32) {
+                       rcv_desc->rcv_free = 0;
+                       /* Window = 1 */
+                       writel((producer - 1) &
+                              (rcv_desc->max_rx_desc_count - 1),
+                              NETXEN_CRB_NORMALIZE(adapter,
+                                                   recv_crb_registers[0].
+                                                   rcv_desc_crb[ringid].
+                                                   crb_rcv_producer_offset));
+                       /*
+                        * Write a doorbell msg to tell phanmon of change in
+                        * receive ring producer
+                        */
+                       netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+                       netxen_set_msg_privid(msg);
+                       netxen_set_msg_count(msg,
+                                            ((producer -
+                                              1) & (rcv_desc->
+                                                    max_rx_desc_count - 1)));
+                       netxen_set_msg_ctxid(msg, 0);
+                       netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+                       writel(msg,
+                              DB_NORMALIZE(adapter,
+                                           NETXEN_RCV_PRODUCER_OFFSET));
+               }
+       }
+}
+
+void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx,
+                                uint32_t ringid)
+{
+       struct pci_dev *pdev = adapter->ahw.pdev;
+       struct sk_buff *skb;
+       struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
+       struct netxen_rcv_desc_ctx *rcv_desc = NULL;
+       u32 producer;
+       struct rcv_desc *pdesc;
+       struct netxen_rx_buffer *buffer;
+       int count = 0;
+       int index = 0;
+
+       adapter->stats.post_called++;
+       rcv_desc = &recv_ctx->rcv_desc[ringid];
+
+       producer = rcv_desc->producer;
+       index = rcv_desc->begin_alloc;
+       buffer = &rcv_desc->rx_buf_arr[index];
+       /* We can start writing rx descriptors into the phantom memory. */
+       while (buffer->state == NETXEN_BUFFER_FREE) {
+               skb = dev_alloc_skb(rcv_desc->skb_size);
+               if (unlikely(!skb)) {
+                       /*
+                        * We need to schedule the posting of buffers to the pegs.
+                        */
+                       rcv_desc->begin_alloc = index;
+                       DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
+                               " allocated only %d buffers\n", count);
+                       break;
+               }
+               count++;        /* now there should be no failure */
+               pdesc = &rcv_desc->desc_head[producer];
+               skb_reserve(skb, 2);
                /* 
                 * This will be setup when we receive the
                 * buffer after it has been filled
@@ -1039,6 +1230,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
                buffer->dma = pci_map_single(pdev, skb->data,
                                             rcv_desc->dma_size,
                                             PCI_DMA_FROMDEVICE);
+
                /* make a rcv descriptor  */
                pdesc->reference_handle = le16_to_cpu(buffer->ref_handle);
                pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size);
@@ -1063,7 +1255,8 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
                        writel((producer - 1) &
                               (rcv_desc->max_rx_desc_count - 1),
                               NETXEN_CRB_NORMALIZE(adapter,
-                                                   rcv_desc_crb->
+                                                   recv_crb_registers[0].
+                                                   rcv_desc_crb[ringid].
                                                    crb_rcv_producer_offset));
                        wmb();
                }
@@ -1196,8 +1389,8 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
 
        switch (data.cmd) {
        case netxen_nic_cmd_pci_read:
-               if ((retval = netxen_nic_hw_read_wx(adapter, data.off,
-                                                   &(data.u), data.size)))
+               if ((retval = netxen_nic_hw_read_ioctl(adapter, data.off,
+                                                      &(data.u), data.size)))
                        goto error_out;
                if (copy_to_user
                    ((void __user *)&(up_data->u), &(data.u), data.size)) {
@@ -1210,8 +1403,35 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
                break;
 
        case netxen_nic_cmd_pci_write:
-               data.rv = netxen_nic_hw_write_wx(adapter, data.off, &(data.u),
-                                                data.size);
+               if ((retval = netxen_nic_hw_write_ioctl(adapter, data.off,
+                                                       &(data.u), data.size)))
+                       goto error_out;
+               data.rv = 0;
+               break;
+
+       case netxen_nic_cmd_pci_mem_read:
+               if (netxen_nic_pci_mem_read_ioctl(adapter, data.off, &(data.u),
+                                                 data.size)) {
+                       DPRINTK(ERR, "Failed to read the data.\n");
+                       retval = -EFAULT;
+                       goto error_out;
+               }
+               if (copy_to_user
+                   ((void __user *)&(up_data->u), &(data.u), data.size)) {
+                       DPRINTK(ERR, "bad copy to userland: %d\n",
+                               (int)sizeof(data));
+                       retval = -EFAULT;
+                       goto error_out;
+               }
+               data.rv = 0;
+               break;
+
+       case netxen_nic_cmd_pci_mem_write:
+               if ((retval = netxen_nic_pci_mem_write_ioctl(adapter, data.off,
+                                                            &(data.u),
+                                                            data.size)))
+                       goto error_out;
+               data.rv = 0;
                break;
 
        case netxen_nic_cmd_pci_config_read:
@@ -1296,7 +1516,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
                retval = -EOPNOTSUPP;
                goto error_out;
        }
-       put_user(data.rv, (u16 __user *) (&(up_data->rv)));
+       put_user(data.rv, (&(up_data->rv)));
        DPRINTK(INFO, "done ioctl for %p well.\n", adapter);
 
       error_out:
index 23e53adbf123f3dcb6fb471ec2f3daabe88b377d..1221fa527552621cb76ed60837457436a6ed1aa3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -36,7 +36,7 @@
 #define NETXEN_NIC_CMD         (NETXEN_CMD_START + 1)
 #define NETXEN_NIC_NAME                (NETXEN_CMD_START + 2)
 #define NETXEN_NIC_NAME_LEN    16
-#define NETXEN_NIC_NAME_RSP    "NETXEN"
+#define NETXEN_NIC_NAME_RSP    "NETXEN-UNM"
 
 typedef enum {
        netxen_nic_cmd_none = 0,
index ae180fee800812be971239e7ec72729285494766..1b45f50fa6aa50687846b26c23bad5679b16ee0e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -68,8 +68,7 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
 void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno,
                                 u32 link)
 {
-       struct netxen_port *pport = adapter->port[portno];
-       struct net_device *netdev = pport->netdev;
+       struct net_device *netdev = (adapter->port[portno])->netdev;
 
        if (link)
                netif_carrier_on(netdev);
@@ -84,46 +83,41 @@ void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
        struct netxen_port *port;
 
        /*  This should clear the interrupt source */
-       if (adapter->ops->phy_read)
-               adapter->ops->phy_read(adapter, portno,
-                                      NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
-                                      &int_src);
+       if (adapter->phy_read)
+               adapter->phy_read(adapter, portno,
+                                 NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS,
+                                 &int_src);
        if (int_src == 0) {
                DPRINTK(INFO, "No phy interrupts for port #%d\n", portno);
                return;
        }
-       if (adapter->ops->disable_phy_interrupts)
-               adapter->ops->disable_phy_interrupts(adapter, portno);
+       if (adapter->disable_phy_interrupts)
+               adapter->disable_phy_interrupts(adapter, portno);
 
        port = adapter->port[portno];
 
        if (netxen_get_phy_int_jabber(int_src))
-               DPRINTK(INFO, "NetXen: %s Jabber interrupt \n",
-                       port->netdev->name);
+               DPRINTK(INFO, "Jabber interrupt \n");
 
        if (netxen_get_phy_int_polarity_changed(int_src))
-               DPRINTK(INFO, "NetXen: %s POLARITY CHANGED int \n",
-                       port->netdev->name);
+               DPRINTK(INFO, "POLARITY CHANGED int \n");
 
        if (netxen_get_phy_int_energy_detect(int_src))
-               DPRINTK(INFO, "NetXen: %s ENERGY DETECT INT \n",
-                       port->netdev->name);
+               DPRINTK(INFO, "ENERGY DETECT INT \n");
 
        if (netxen_get_phy_int_downshift(int_src))
-               DPRINTK(INFO, "NetXen: %s DOWNSHIFT INT \n",
-                       port->netdev->name);
+               DPRINTK(INFO, "DOWNSHIFT INT \n");
        /* write it down later.. */
        if ((netxen_get_phy_int_speed_changed(int_src))
            || (netxen_get_phy_int_link_status_changed(int_src))) {
                __le32 status;
 
-               DPRINTK(INFO, "NetXen: %s SPEED CHANGED OR"
-                       " LINK STATUS CHANGED \n", port->netdev->name);
+               DPRINTK(INFO, "SPEED CHANGED OR LINK STATUS CHANGED \n");
 
-               if (adapter->ops->phy_read
-                   && adapter->ops->phy_read(adapter, portno,
-                                             NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
-                                             &status) == 0) {
+               if (adapter->phy_read
+                   && adapter->phy_read(adapter, portno,
+                                        NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+                                        &status) == 0) {
                        if (netxen_get_phy_int_link_status_changed(int_src)) {
                                if (netxen_get_phy_link(status)) {
                                        netxen_niu_gbe_init_port(adapter,
@@ -143,8 +137,8 @@ void netxen_handle_port_int(struct netxen_adapter *adapter, u32 portno,
                        }
                }
        }
-       if (adapter->ops->enable_phy_interrupts)
-               adapter->ops->enable_phy_interrupts(adapter, portno);
+       if (adapter->enable_phy_interrupts)
+               adapter->enable_phy_interrupts(adapter, portno);
 }
 
 void netxen_nic_isr_other(struct netxen_adapter *adapter)
@@ -159,8 +153,7 @@ void netxen_nic_isr_other(struct netxen_adapter *adapter)
 
        qg_linksup = adapter->ahw.qg_linksup;
        adapter->ahw.qg_linksup = val;
-       DPRINTK(1, INFO, "%s: link update 0x%08x\n", netxen_nic_driver_name,
-               val);
+       DPRINTK(INFO, "link update 0x%08x\n", val);
        for (portno = 0; portno < NETXEN_NIU_MAX_GBE_PORTS; portno++) {
                linkup = val & 1;
                if (linkup != (qg_linksup & 1)) {
index 1cb662d5bd7610af2c3977d5b69559f540f9bd97..575b71b672028e4896cc347efe76cbb2a3ef7103 100644 (file)
@@ -1,25 +1,25 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * of the License, or (at your option) any later version.
- *                            
+ *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *                                   
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
@@ -32,6 +32,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/highmem.h>
 #include "netxen_nic_hw.h"
 
 #include "netxen_nic.h"
@@ -48,14 +49,21 @@ MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
 
-char netxen_nic_driver_name[] = "netxen";
+char netxen_nic_driver_name[] = "netxen-nic";
 static char netxen_nic_driver_string[] = "NetXen Network Driver version "
     NETXEN_NIC_LINUX_VERSIONID;
 
+struct netxen_adapter *g_adapter = NULL;
+
 #define NETXEN_NETDEV_WEIGHT 120
 #define NETXEN_ADAPTER_UP_MAGIC 777
 #define NETXEN_NIC_PEG_TUNE 0
 
+u8 nx_p2_id = NX_P2_C0;
+
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#define DMA_35BIT_MASK 0x00000007ffffffffULL
+
 /* Local functions to NetXen NIC driver */
 static int __devinit netxen_nic_probe(struct pci_dev *pdev,
                                      const struct pci_device_id *ent);
@@ -64,7 +72,7 @@ static int netxen_nic_open(struct net_device *netdev);
 static int netxen_nic_close(struct net_device *netdev);
 static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
 static void netxen_tx_timeout(struct net_device *netdev);
-static void netxen_tx_timeout_task(struct net_device *netdev);
+static void netxen_tx_timeout_task(struct work_struct *work);
 static void netxen_watchdog(unsigned long);
 static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
 static int netxen_nic_ioctl(struct net_device *netdev,
@@ -87,6 +95,9 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
 
 MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
 
+struct workqueue_struct *netxen_workq;
+static void netxen_watchdog(unsigned long);
+
 /*
  * netxen_nic_probe()
  *
@@ -105,20 +116,28 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct net_device *netdev = NULL;
        struct netxen_adapter *adapter = NULL;
        struct netxen_port *port = NULL;
-       u8 *mem_ptr0 = NULL;
-       u8 *mem_ptr1 = NULL;
-       u8 *mem_ptr2 = NULL;
+       void __iomem *mem_ptr0 = NULL;
+       void __iomem *mem_ptr1 = NULL;
+       void __iomem *mem_ptr2 = NULL;
 
-       unsigned long mem_base, mem_len;
+       u8 *db_ptr = NULL;
+       unsigned long mem_base, mem_len, db_base, db_len;
        int pci_using_dac, i, err;
        int ring;
        struct netxen_recv_context *recv_ctx = NULL;
        struct netxen_rcv_desc_ctx *rcv_desc = NULL;
        struct netxen_cmd_buffer *cmd_buf_arr = NULL;
        u64 mac_addr[FLASH_NUM_PORTS + 1];
-       int valid_mac;
+       int valid_mac = 0;
+       static int netxen_cards_found = 0;
 
        printk(KERN_INFO "%s \n", netxen_nic_driver_string);
+       /* In current scheme, we use only PCI function 0 */
+       if (PCI_FUNC(pdev->devfn) != 0) {
+               DPRINTK(ERR, "NetXen function %d will not be enabled.\n",
+                       PCI_FUNC(pdev->devfn));
+               return -ENODEV;
+       }
        if ((err = pci_enable_device(pdev)))
                return err;
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
@@ -130,10 +149,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_disable_pdev;
 
        pci_set_master(pdev);
-       if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) &&
-           (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0))
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
+       if (nx_p2_id == NX_P2_C1 &&
+           (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
+           (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
                pci_using_dac = 1;
-       else {
+       else {
                if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
                    (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
                        goto err_out_free_res;
@@ -153,21 +174,34 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
            ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
 
        if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
-               DPRINTK(1, ERR,
+               DPRINTK(ERR,
                        "Cannot remap adapter memory aborting.:"
                        "0 -> %p, 1 -> %p, 2 -> %p\n",
                        mem_ptr0, mem_ptr1, mem_ptr2);
 
                err = -EIO;
-               if (mem_ptr0)
-                       iounmap(mem_ptr0);
-               if (mem_ptr1)
-                       iounmap(mem_ptr1);
-               if (mem_ptr2)
-                       iounmap(mem_ptr2);
-
-               goto err_out_free_res;
+               goto err_out_iounmap;
+       }
+       db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
+       db_len = pci_resource_len(pdev, 4);
+
+       if (db_len == 0) {
+               printk(KERN_ERR "%s: doorbell is disabled\n",
+                      netxen_nic_driver_name);
+               err = -EIO;
+               goto err_out_iounmap;
+       }
+       DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
+               db_len);
+
+       db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+       if (db_ptr == 0UL) {
+               printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+                      netxen_nic_driver_name);
+               err = -EIO;
+               goto err_out_iounmap;
        }
+       DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
 
 /*
  *      Allocate a adapter structure which will manage all the initialization
@@ -183,17 +217,24 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                       netxen_nic_driver_name,
                       (int)sizeof(struct netxen_adapter));
                err = -ENOMEM;
-               goto err_out_iounmap;
+               goto err_out_dbunmap;
        }
 
+       if (netxen_cards_found == 0) {
+               g_adapter = adapter;
+       }
        adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
        adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
        adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
+       adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
 
        pci_set_drvdata(pdev, adapter);
 
        cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
        if (cmd_buf_arr == NULL) {
+               printk(KERN_ERR
+                      "%s: Could not allocate cmd_buf_arr memory:%d\n",
+                      netxen_nic_driver_name, (int)TX_RINGSIZE);
                err = -ENOMEM;
                goto err_out_free_adapter;
        }
@@ -220,11 +261,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
                                break;
 
+                       case RCV_RING_LRO:
+                               rcv_desc->max_rx_desc_count =
+                                   adapter->max_lro_rx_desc_count;
+                               rcv_desc->flags = RCV_DESC_LRO;
+                               rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
+                               rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
+                               break;
+
                        }
                        rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
                            vmalloc(RCV_BUFFSIZE);
 
                        if (rcv_desc->rx_buf_arr == NULL) {
+                               printk(KERN_ERR "%s: Could not allocate"
+                                      "rcv_desc->rx_buf_arr memory:%d\n",
+                                      netxen_nic_driver_name,
+                                      (int)RCV_BUFFSIZE);
                                err = -ENOMEM;
                                goto err_out_free_rx_buffer;
                        }
@@ -233,30 +286,21 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        }
 
-       adapter->ops = kzalloc(sizeof(struct netxen_drvops), GFP_KERNEL);
-       if (adapter->ops == NULL) {
-               printk(KERN_ERR
-                      "%s: Could not allocate memory for adapter->ops:%d\n",
-                      netxen_nic_driver_name,
-                      (int)sizeof(struct netxen_adapter));
-               err = -ENOMEM;
-               goto err_out_free_rx_buffer;
-       }
-
        adapter->cmd_buf_arr = cmd_buf_arr;
        adapter->ahw.pci_base0 = mem_ptr0;
        adapter->ahw.pci_base1 = mem_ptr1;
        adapter->ahw.pci_base2 = mem_ptr2;
+       adapter->ahw.db_base = db_ptr;
+       adapter->ahw.db_len = db_len;
        spin_lock_init(&adapter->tx_lock);
        spin_lock_init(&adapter->lock);
+       netxen_initialize_adapter_sw(adapter);  /* initialize the buffers in adapter */
 #ifdef CONFIG_IA64
        netxen_pinit_from_rom(adapter, 0);
        udelay(500);
        netxen_load_firmware(adapter);
 #endif
 
-       /* initialize the buffers in adapter */
-       netxen_initialize_adapter_sw(adapter);
        /*
         * Set the CRB window to invalid. If any register in window 0 is
         * accessed it should set the window to 0 and then reset it to 1.
@@ -274,11 +318,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->ahw.xg_linkup = 0;
        adapter->watchdog_timer.function = &netxen_watchdog;
        adapter->watchdog_timer.data = (unsigned long)adapter;
-       INIT_WORK(&adapter->watchdog_task,
-                 (void (*)(void *))netxen_watchdog_task, adapter);
+       INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task);
        adapter->ahw.pdev = pdev;
        adapter->proc_cmd_buf_counter = 0;
-       pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
+       adapter->ahw.revision_id = nx_p2_id;
 
        if (pci_enable_msi(pdev)) {
                adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
@@ -300,6 +343,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
        writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
 
+       /* do this before waking up pegs so that we have valid dummy dma addr */
+       err = netxen_initialize_adapter_offload(adapter);
+       if (err) {
+               goto err_out_free_dev;
+       }
+
        /* Unlock the HW, prompting the boot sequence */
        writel(1,
               NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
@@ -308,6 +357,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
 
        /* initialize the all the ports */
+       adapter->active_ports = 0;
 
        for (i = 0; i < adapter->ahw.max_ports; i++) {
                netdev = alloc_etherdev(sizeof(struct netxen_port));
@@ -373,14 +423,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                       netdev->dev_addr[4],
                                       netdev->dev_addr[5]);
                        } else {
-                               if (adapter->ops->macaddr_set)
-                                       adapter->ops->macaddr_set(port,
-                                                                 netdev->
-                                                                 dev_addr);
+                               if (adapter->macaddr_set)
+                                       adapter->macaddr_set(port,
+                                                            netdev->dev_addr);
                        }
                }
-               INIT_WORK(&adapter->tx_timeout_task,
-                         (void (*)(void *))netxen_tx_timeout_task, netdev);
+               adapter->netdev = netdev;
+               INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
                netif_carrier_off(netdev);
                netif_stop_queue(netdev);
 
@@ -392,7 +441,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        goto err_out_free_dev;
                }
                adapter->port_count++;
-               adapter->active_ports = 0;
                adapter->port[i] = port;
        }
 
@@ -413,6 +461,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        }
 
+       adapter->number = netxen_cards_found;
        adapter->driver_mismatch = 0;
 
        return 0;
@@ -427,7 +476,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        free_netdev(port->netdev);
                }
        }
-       kfree(adapter->ops);
+
+       netxen_free_adapter_offload(adapter);
 
       err_out_free_rx_buffer:
        for (i = 0; i < MAX_RCV_CTX; ++i) {
@@ -440,19 +490,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        }
                }
        }
-
        vfree(cmd_buf_arr);
 
-       kfree(adapter->port);
-
       err_out_free_adapter:
        pci_set_drvdata(pdev, NULL);
        kfree(adapter);
 
+      err_out_dbunmap:
+       if (db_ptr)
+               iounmap(db_ptr);
+
       err_out_iounmap:
-       iounmap(mem_ptr0);
-       iounmap(mem_ptr1);
-       iounmap(mem_ptr2);
+       if (mem_ptr0)
+               iounmap(mem_ptr0);
+       if (mem_ptr1)
+               iounmap(mem_ptr1);
+       if (mem_ptr2)
+               iounmap(mem_ptr2);
 
       err_out_free_res:
        pci_release_regions(pdev);
@@ -477,12 +531,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
 
        netxen_nic_stop_all_ports(adapter);
        /* leave the hw in the same state as reboot */
-       netxen_pinit_from_rom(adapter, 0);
-       udelay(500);
        netxen_load_firmware(adapter);
-
-       if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
-               netxen_nic_disable_int(adapter);
+       netxen_free_adapter_offload(adapter);
 
        udelay(500);            /* Delay for a while to drain the DMA engines */
        for (i = 0; i < adapter->port_count; i++) {
@@ -499,6 +549,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
        if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
                netxen_free_hw_resources(adapter);
 
+       iounmap(adapter->ahw.db_base);
        iounmap(adapter->ahw.pci_base0);
        iounmap(adapter->ahw.pci_base1);
        iounmap(adapter->ahw.pci_base2);
@@ -525,7 +576,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
        }
 
        vfree(adapter->cmd_buf_arr);
-       kfree(adapter->ops);
        kfree(adapter);
 }
 
@@ -547,6 +597,8 @@ static int netxen_nic_open(struct net_device *netdev)
                        return -EIO;
                }
                netxen_nic_flash_print(adapter);
+               if (adapter->init_niu)
+                       adapter->init_niu(adapter);
 
                /* setup all the resources for the Phantom... */
                /* this include the descriptors for rcv, tx, and status */
@@ -557,32 +609,31 @@ static int netxen_nic_open(struct net_device *netdev)
                               err);
                        return err;
                }
-               if (adapter->ops->init_port
-                   && adapter->ops->init_port(adapter, port->portnum) != 0) {
+               if (adapter->init_port
+                   && adapter->init_port(adapter, port->portnum) != 0) {
                        printk(KERN_ERR "%s: Failed to initialize port %d\n",
                               netxen_nic_driver_name, port->portnum);
                        netxen_free_hw_resources(adapter);
                        return -EIO;
                }
-               if (adapter->ops->init_niu)
-                       adapter->ops->init_niu(adapter);
                for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
                        for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
                                netxen_post_rx_buffers(adapter, ctx, ring);
                }
-               adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
-       }
-       adapter->active_ports++;
-       if (adapter->active_ports == 1) {
+               adapter->irq = adapter->ahw.pdev->irq;
                err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
                                  SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name,
                                  adapter);
                if (err) {
                        printk(KERN_ERR "request_irq failed with: %d\n", err);
-                       adapter->active_ports--;
+                       netxen_free_hw_resources(adapter);
                        return err;
                }
-               adapter->irq = adapter->ahw.pdev->irq;
+
+               adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+       }
+       adapter->active_ports++;
+       if (adapter->active_ports == 1) {
                if (!adapter->driver_mismatch)
                        mod_timer(&adapter->watchdog_timer, jiffies);
 
@@ -591,11 +642,14 @@ static int netxen_nic_open(struct net_device *netdev)
 
        /* Done here again so that even if phantom sw overwrote it,
         * we set it */
-       if (adapter->ops->macaddr_set)
-               adapter->ops->macaddr_set(port, netdev->dev_addr);
+       if (adapter->macaddr_set)
+               adapter->macaddr_set(port, netdev->dev_addr);
        netxen_nic_set_link_parameters(port);
 
        netxen_nic_set_multi(netdev);
+       if (adapter->set_mtu)
+               adapter->set_mtu(port, netdev->mtu);
+
        if (!adapter->driver_mismatch)
                netif_start_queue(netdev);
 
@@ -648,6 +702,7 @@ static int netxen_nic_close(struct net_device *netdev)
                        }
                        cmd_buff++;
                }
+               FLUSH_SCHEDULED_WORK();
                del_timer_sync(&adapter->watchdog_timer);
        }
 
@@ -668,7 +723,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct cmd_desc_type0 *hwdesc;
        int k;
        struct netxen_cmd_buffer *pbuf = NULL;
-       unsigned int tries = 0;
        static int dropped_packet = 0;
        int frag_count;
        u32 local_producer = 0;
@@ -730,7 +784,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                        if (((skb->nh.iph)->ihl * sizeof(u32)) +
                            ((skb->h.th)->doff * sizeof(u32)) +
                            sizeof(struct ethhdr) >
-                           (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
+                           (sizeof(struct cmd_desc_type0) - 2)) {
                                no_of_desc++;
                        }
                }
@@ -741,27 +795,17 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        if ((k + no_of_desc) >=
            ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
             last_cmd_consumer)) {
+               port->stats.nocmddescriptor++;
+               DPRINTK(ERR, "No command descriptors available,"
+                       " producer = %d, consumer = %d count=%llu,"
+                       " dropping packet\n", producer,
+                       adapter->last_cmd_consumer,
+                       port->stats.nocmddescriptor);
+
+               netif_stop_queue(netdev);
+               port->flags |= NETXEN_NETDEV_STATUS;
                spin_unlock_bh(&adapter->tx_lock);
-               if (tries == 0) {
-                       local_bh_disable();
-                       netxen_process_cmd_ring((unsigned long)adapter);
-                       local_bh_enable();
-                       ++tries;
-                       goto retry_getting_window;
-               } else {
-                       port->stats.nocmddescriptor++;
-                       DPRINTK(ERR, "No command descriptors available,"
-                               " producer = %d, consumer = %d count=%llu,"
-                               " dropping packet\n", producer,
-                               adapter->last_cmd_consumer,
-                               port->stats.nocmddescriptor);
-
-                       spin_lock_bh(&adapter->tx_lock);
-                       netif_stop_queue(netdev);
-                       port->flags |= NETXEN_NETDEV_STATUS;
-                       spin_unlock_bh(&adapter->tx_lock);
-                       return NETDEV_TX_BUSY;
-               }
+               return NETDEV_TX_BUSY;
        }
        k = get_index_range(k, max_tx_desc_count, no_of_desc);
        adapter->cmd_producer = k;
@@ -783,7 +827,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                pbuf->mss = 0;
                hwdesc->mss = 0;
        }
-       pbuf->no_of_descriptors = no_of_desc;
        pbuf->total_length = skb->len;
        pbuf->skb = skb;
        pbuf->cmd = TX_ETHER_PKT;
@@ -793,11 +836,11 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len,
                                      PCI_DMA_TODEVICE);
        buffrag->length = first_seg_len;
-       CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len);
-       hwdesc->num_of_buffers = frag_count;
-       hwdesc->opcode = TX_ETHER_PKT;
+       netxen_set_cmd_desc_totallength(hwdesc, skb->len);
+       netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
+       netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
 
-       CMD_DESC_PORT_WRT(hwdesc, port->portnum);
+       netxen_set_cmd_desc_port(hwdesc, port->portnum);
        hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
        hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
 
@@ -856,12 +899,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        /* For LSO, we need to copy the MAC/IP/TCP headers into
         * the descriptor ring
         */
-       if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) {
+       if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
+           == TX_TCP_LSO) {
                int hdr_len, first_hdr_len, more_hdr;
                hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
-               if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
-                       first_hdr_len =
-                           sizeof(struct cmd_desc_type0) - NET_IP_ALIGN;
+               if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
+                       first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
                        more_hdr = 1;
                } else {
                        first_hdr_len = hdr_len;
@@ -871,7 +914,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                hwdesc = &hw->cmd_desc_head[producer];
 
                /* copy the first 64 bytes */
-               memcpy(((void *)hwdesc) + NET_IP_ALIGN,
+               memcpy(((void *)hwdesc) + 2,
                       (void *)(skb->data), first_hdr_len);
                producer = get_next_index(producer, max_tx_desc_count);
 
@@ -887,7 +930,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
        spin_lock_bh(&adapter->tx_lock);
        port->stats.txbytes +=
-           CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]);
+           netxen_get_cmd_desc_totallength(&hw->cmd_desc_head[saved_producer]);
        /* Code to update the adapter considering how many producer threads
           are currently working */
        if ((--adapter->num_threads) == 0) {
@@ -897,20 +940,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                       NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
                wmb();
                adapter->total_threads = 0;
-       } else {
-               u32 crb_producer = 0;
-               crb_producer =
-                   readl(NETXEN_CRB_NORMALIZE
-                         (adapter, CRB_CMD_PRODUCER_OFFSET));
-               if (crb_producer == local_producer) {
-                       crb_producer = get_index_range(crb_producer,
-                                                      max_tx_desc_count,
-                                                      no_of_desc);
-                       writel(crb_producer,
-                              NETXEN_CRB_NORMALIZE(adapter,
-                                                   CRB_CMD_PRODUCER_OFFSET));
-                       wmb();
-               }
        }
 
        port->stats.xmitfinished++;
@@ -927,29 +956,36 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 static void netxen_watchdog(unsigned long v)
 {
        struct netxen_adapter *adapter = (struct netxen_adapter *)v;
-       schedule_work(&adapter->watchdog_task);
+       if (adapter != g_adapter) {
+               printk("%s: ***BUG*** adapter[%p] != g_adapter[%p]\n",
+                      __FUNCTION__, adapter, g_adapter);
+               return;
+       }
+
+       SCHEDULE_WORK(&adapter->watchdog_task);
 }
 
 static void netxen_tx_timeout(struct net_device *netdev)
 {
        struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
-       struct netxen_adapter *adapter = port->adapter;
 
-       schedule_work(&adapter->tx_timeout_task);
+       SCHEDULE_WORK(&port->adapter->tx_timeout_task);
 }
 
-static void netxen_tx_timeout_task(struct net_device *netdev)
+static void netxen_tx_timeout_task(struct work_struct *work)
 {
-       struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
+       struct netxen_adapter *adapter =
+               container_of(work, struct netxen_adapter, tx_timeout_task);
+       struct net_device *netdev = adapter->netdev;
        unsigned long flags;
 
        printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
               netxen_nic_driver_name, netdev->name);
 
-       spin_lock_irqsave(&port->adapter->lock, flags);
+       spin_lock_irqsave(&adapter->lock, flags);
        netxen_nic_close(netdev);
        netxen_nic_open(netdev);
-       spin_unlock_irqrestore(&port->adapter->lock, flags);
+       spin_unlock_irqrestore(&adapter->lock, flags);
        netdev->trans_start = jiffies;
        netif_wake_queue(netdev);
 }
@@ -966,6 +1002,11 @@ netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
        if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
                int count = 0;
                u32 mask;
+               mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
+               if ((mask & 0x80) == 0) {
+                       /* not our interrupt */
+                       return ret;
+               }
                netxen_nic_disable_int(adapter);
                /* Window = 0 or 1 */
                do {
@@ -1025,7 +1066,10 @@ irqreturn_t netxen_intr(int irq, void *data)
                netdev = port->netdev;
 
                /* process our status queue (for all 4 ports) */
-               netxen_handle_int(adapter, netdev);
+               if (netif_running(netdev)) {
+                       netxen_handle_int(adapter, netdev);
+                       break;
+               }
        }
 
        return IRQ_HANDLED;
@@ -1039,11 +1083,12 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget)
        int done = 1;
        int ctx;
        int this_work_done;
+       int work_done = 0;
 
        DPRINTK(INFO, "polling for %d descriptors\n", *budget);
        port->stats.polled++;
 
-       adapter->work_done = 0;
+       work_done = 0;
        for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
                /*
                 * Fairness issue. This will give undue weight to the
@@ -1060,20 +1105,20 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget)
                this_work_done = netxen_process_rcv_ring(adapter, ctx,
                                                         work_to_do /
                                                         MAX_RCV_CTX);
-               adapter->work_done += this_work_done;
+               work_done += this_work_done;
        }
 
-       netdev->quota -= adapter->work_done;
-       *budget -= adapter->work_done;
+       netdev->quota -= work_done;
+       *budget -= work_done;
 
-       if (adapter->work_done >= work_to_do
-           && netxen_nic_rx_has_work(adapter) != 0)
+       if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0)
                done = 0;
 
-       netxen_process_cmd_ring((unsigned long)adapter);
+       if (netxen_process_cmd_ring((unsigned long)adapter) == 0)
+               done = 0;
 
        DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
-               adapter->work_done, work_to_do);
+               work_done, work_to_do);
        if (done) {
                netif_rx_complete(netdev);
                netxen_nic_enable_int(adapter);
@@ -1116,8 +1161,9 @@ netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
                if (ifr->ifr_data) {
                        sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP,
                                port->portnum);
-                       nr_bytes = copy_to_user((char *)ifr->ifr_data, dev_name,
-                                               NETXEN_NIC_NAME_LEN);
+                       nr_bytes =
+                           copy_to_user((char __user *)ifr->ifr_data, dev_name,
+                                        NETXEN_NIC_NAME_LEN);
                        if (nr_bytes)
                                err = -EIO;
 
@@ -1144,6 +1190,9 @@ static struct pci_driver netxen_driver = {
 
 static int __init netxen_init_module(void)
 {
+       if ((netxen_workq = create_singlethread_workqueue("netxen")) == 0)
+               return -ENOMEM;
+
        return pci_module_init(&netxen_driver);
 }
 
@@ -1154,7 +1203,7 @@ static void __exit netxen_exit_module(void)
        /*
         * Wait for some time to allow the dma to drain, if any.
         */
-       mdelay(5);
+       destroy_workqueue(netxen_workq);
        pci_unregister_driver(&netxen_driver);
 }
 
index 7950a04532e6603e69a0190f6df90593ae4e7226..4987dc765d99017391a0f29f0612a61e46fa874c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 - 2006 NetXen, Inc.
  * All rights reserved.
- * 
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * as published by the Free Software Foundation; either version 2
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
  * MA  02111-1307, USA.
- * 
+ *
  * The full GNU General Public License is included in this distribution
  * in the file called LICENSE.
- * 
+ *
  * Contact Information:
  *    info@netxen.com
  * NetXen,
 
 static long phy_lock_timeout = 100000000;
 
-static inline int phy_lock(void)
+static inline int phy_lock(struct netxen_adapter *adapter)
 {
        int i;
        int done = 0, timeout = 0;
 
        while (!done) {
-               done = readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_LOCK));
+               done =
+                   readl(pci_base_offset
+                         (adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK)));
                if (done == 1)
                        break;
                if (timeout >= phy_lock_timeout) {
@@ -61,13 +63,15 @@ static inline int phy_lock(void)
                }
        }
 
-       writel(NETXEN_PHY_LOCK_ID, (void __iomem *)PHY_LOCK_DRIVER);
+       writel(PHY_LOCK_DRIVER,
+              NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID));
        return 0;
 }
 
-static inline int phy_unlock(void)
+static inline int phy_unlock(struct netxen_adapter *adapter)
 {
-       readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK));
+       readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)));
+
        return 0;
 }
 
@@ -95,7 +99,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
        __le32 status;
        __le32 mac_cfg0;
 
-       if (phy_lock() != 0) {
+       if (phy_lock(adapter) != 0) {
                return -1;
        }
 
@@ -162,7 +166,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
                                           NETXEN_NIU_GB_MAC_CONFIG_0(0),
                                           &mac_cfg0, 4))
                        return -EIO;
-       phy_unlock();
+       phy_unlock(adapter);
        return result;
 }
 
@@ -399,8 +403,8 @@ int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port)
 {
        int result = 0;
        __le32 status;
-       if (adapter->ops->disable_phy_interrupts)
-               adapter->ops->disable_phy_interrupts(adapter, port);
+       if (adapter->disable_phy_interrupts)
+               adapter->disable_phy_interrupts(adapter, port);
        mdelay(2);
 
        if (0 ==
@@ -612,7 +616,7 @@ int netxen_niu_macaddr_set(struct netxen_port *port,
        __le32 temp = 0;
        struct netxen_adapter *adapter = port->adapter;
        int phy = port->portnum;
-       unsigned char mac_addr[MAX_ADDR_LEN];
+       unsigned char mac_addr[6];
        int i;
 
        for (i = 0; i < 10; i++) {
@@ -631,7 +635,7 @@ int netxen_niu_macaddr_set(struct netxen_port *port,
 
                netxen_niu_macaddr_get(adapter, phy,
                                       (netxen_ethernet_macaddr_t *) mac_addr);
-               if (memcmp(mac_addr, addr, MAX_ADDR_LEN == 0))
+               if (memcmp(mac_addr, addr, 6) == 0)
                        break;
        }
 
index 8181d436783f0a2d582f3b2ae55372b2e50ae695..7879f855af0b27786414d0b06a914a6cb9c3011a 100644 (file)
 /* 
  * CRB Registers or queue message done only at initialization time.
  */
+#define NIC_CRB_BASE               NETXEN_CAM_RAM(0x200)
+#define NETXEN_NIC_REG(X)             (NIC_CRB_BASE+(X))
 
-/*
- * The following 2 are the base adresses for the CRB registers and their
- * offsets will be added to get addresses for the index addresses.
- */
-#define NIC_CRB_BASE_PORT1     NETXEN_CAM_RAM(0x200)
-#define NIC_CRB_BASE_PORT2     NETXEN_CAM_RAM(0x250)
+#define CRB_PHAN_CNTRL_LO_OFFSET    NETXEN_NIC_REG(0x00)
+#define CRB_PHAN_CNTRL_HI_OFFSET    NETXEN_NIC_REG(0x04)
+#define CRB_CMD_PRODUCER_OFFSET     NETXEN_NIC_REG(0x08)
+#define CRB_CMD_CONSUMER_OFFSET     NETXEN_NIC_REG(0x0c)
+#define CRB_PAUSE_ADDR_LO           NETXEN_NIC_REG(0x10)       /* C0 EPG BUG  */
+#define CRB_PAUSE_ADDR_HI           NETXEN_NIC_REG(0x14)
+#define CRB_HOST_CMD_ADDR_HI        NETXEN_NIC_REG(0x18)       /* host add:cmd ring */
+#define CRB_HOST_CMD_ADDR_LO        NETXEN_NIC_REG(0x1c)
+#define CRB_CMD_INTR_LOOP           NETXEN_NIC_REG(0x20)       /* 4 regs for perf */
+#define CRB_CMD_DMA_LOOP            NETXEN_NIC_REG(0x24)
+#define CRB_RCV_INTR_LOOP           NETXEN_NIC_REG(0x28)
+#define CRB_RCV_DMA_LOOP            NETXEN_NIC_REG(0x2c)
+#define CRB_ENABLE_TX_INTR          NETXEN_NIC_REG(0x30)       /* phantom init status */
+#define CRB_MMAP_ADDR_3             NETXEN_NIC_REG(0x34)
+#define CRB_CMDPEG_CMDRING          NETXEN_NIC_REG(0x38)
+#define CRB_HOST_DUMMY_BUF_ADDR_HI  NETXEN_NIC_REG(0x3c)
+#define CRB_HOST_DUMMY_BUF_ADDR_LO  NETXEN_NIC_REG(0x40)
+#define CRB_MMAP_ADDR_0             NETXEN_NIC_REG(0x44)
+#define CRB_MMAP_ADDR_1             NETXEN_NIC_REG(0x48)
+#define CRB_MMAP_ADDR_2             NETXEN_NIC_REG(0x4c)
+#define CRB_CMDPEG_STATE            NETXEN_NIC_REG(0x50)
+#define CRB_MMAP_SIZE_0             NETXEN_NIC_REG(0x54)
+#define CRB_MMAP_SIZE_1             NETXEN_NIC_REG(0x58)
+#define CRB_MMAP_SIZE_2             NETXEN_NIC_REG(0x5c)
+#define CRB_MMAP_SIZE_3             NETXEN_NIC_REG(0x60)
+#define CRB_GLOBAL_INT_COAL         NETXEN_NIC_REG(0x64)       /* interrupt coalescing */
+#define CRB_INT_COAL_MODE           NETXEN_NIC_REG(0x68)
+#define CRB_MAX_RCV_BUFS            NETXEN_NIC_REG(0x6c)
+#define CRB_TX_INT_THRESHOLD        NETXEN_NIC_REG(0x70)
+#define CRB_RX_PKT_TIMER            NETXEN_NIC_REG(0x74)
+#define CRB_TX_PKT_TIMER            NETXEN_NIC_REG(0x78)
+#define CRB_RX_PKT_CNT              NETXEN_NIC_REG(0x7c)
+#define CRB_RX_TMR_CNT              NETXEN_NIC_REG(0x80)
+#define CRB_RX_LRO_TIMER            NETXEN_NIC_REG(0x84)
+#define CRB_RX_LRO_MID_TIMER        NETXEN_NIC_REG(0x88)
+#define CRB_DMA_MAX_RCV_BUFS        NETXEN_NIC_REG(0x8c)
+#define CRB_MAX_DMA_ENTRIES         NETXEN_NIC_REG(0x90)
+#define CRB_XG_STATE                NETXEN_NIC_REG(0x94)       /* XG Link status */
+#define CRB_AGENT_GO                NETXEN_NIC_REG(0x98)       /* NIC pkt gen agent */
+#define CRB_AGENT_TX_SIZE           NETXEN_NIC_REG(0x9c)
+#define CRB_AGENT_TX_TYPE           NETXEN_NIC_REG(0xa0)
+#define CRB_AGENT_TX_ADDR           NETXEN_NIC_REG(0xa4)
+#define CRB_AGENT_TX_MSS            NETXEN_NIC_REG(0xa8)
+#define CRB_TX_STATE                NETXEN_NIC_REG(0xac)       /* Debug -performance */
+#define CRB_TX_COUNT                NETXEN_NIC_REG(0xb0)
+#define CRB_RX_STATE                NETXEN_NIC_REG(0xb4)
+#define CRB_RX_PERF_DEBUG_1         NETXEN_NIC_REG(0xb8)
+#define CRB_RX_LRO_CONTROL          NETXEN_NIC_REG(0xbc)       /* LRO On/OFF */
+#define CRB_RX_LRO_START_NUM        NETXEN_NIC_REG(0xc0)
+#define CRB_MPORT_MODE              NETXEN_NIC_REG(0xc4)       /* Multiport Mode */
+#define CRB_CMD_RING_SIZE           NETXEN_NIC_REG(0xc8)
+#define CRB_INT_VECTOR              NETXEN_NIC_REG(0xd4)
+#define CRB_CTX_RESET               NETXEN_NIC_REG(0xd8)
+#define CRB_HOST_STS_PROD           NETXEN_NIC_REG(0xdc)
+#define CRB_HOST_STS_CONS           NETXEN_NIC_REG(0xe0)
+#define CRB_PEG_CMD_PROD            NETXEN_NIC_REG(0xe4)
+#define CRB_PEG_CMD_CONS            NETXEN_NIC_REG(0xe8)
+#define CRB_HOST_BUFFER_PROD        NETXEN_NIC_REG(0xec)
+#define CRB_HOST_BUFFER_CONS        NETXEN_NIC_REG(0xf0)
+#define CRB_JUMBO_BUFFER_PROD       NETXEN_NIC_REG(0xf4)
+#define CRB_JUMBO_BUFFER_CONS       NETXEN_NIC_REG(0xf8)
 
-#define NETXEN_NIC_REG(X)      (NIC_CRB_BASE_PORT1+(X))
+#define CRB_CMD_PRODUCER_OFFSET_1   NETXEN_NIC_REG(0x1ac)
+#define CRB_CMD_CONSUMER_OFFSET_1   NETXEN_NIC_REG(0x1b0)
+#define CRB_TEMP_STATE              NETXEN_NIC_REG(0x1b4)
 
 /*
  * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
  * on the Phantom.
  */
 
-#define CRB_PHAN_CNTRL_LO_OFFSET       NETXEN_NIC_REG(0x00)
-#define CRB_PHAN_CNTRL_HI_OFFSET       NETXEN_NIC_REG(0x04)
-
-/* point to the indexes */
-#define CRB_CMD_PRODUCER_OFFSET                NETXEN_NIC_REG(0x08)
-#define CRB_CMD_CONSUMER_OFFSET                NETXEN_NIC_REG(0x0c)
-
-#define CRB_PAUSE_ADDR_LO              NETXEN_NIC_REG(0x10)
-#define CRB_PAUSE_ADDR_HI              NETXEN_NIC_REG(0x14)
-
-/* address of command descriptors in the host memory */
-#define CRB_HOST_CMD_ADDR_HI           NETXEN_NIC_REG(0x30)
-#define CRB_HOST_CMD_ADDR_LO           NETXEN_NIC_REG(0x34)
-
-/* The following 4 CRB registers are for doing performance coal */
-#define CRB_CMD_INTR_LOOP              NETXEN_NIC_REG(0x38)
-#define CRB_CMD_DMA_LOOP               NETXEN_NIC_REG(0x3c)
-#define CRB_RCV_INTR_LOOP              NETXEN_NIC_REG(0x40)
-#define CRB_RCV_DMA_LOOP               NETXEN_NIC_REG(0x44)
-
-/* Needed by the host to find out the state of Phantom's initialization */
-#define CRB_ENABLE_TX_INTR             NETXEN_NIC_REG(0x4c)
-#define CRB_CMDPEG_STATE               NETXEN_NIC_REG(0x50)
-#define CRB_CMDPEG_CMDRING             NETXEN_NIC_REG(0x54)
-
-/* Interrupt coalescing parameters */
-#define CRB_GLOBAL_INT_COAL            NETXEN_NIC_REG(0x80)
-#define CRB_INT_COAL_MODE              NETXEN_NIC_REG(0x84)
-#define CRB_MAX_RCV_BUFS               NETXEN_NIC_REG(0x88)
-#define CRB_TX_INT_THRESHOLD           NETXEN_NIC_REG(0x8c)
-#define CRB_RX_PKT_TIMER               NETXEN_NIC_REG(0x90)
-#define CRB_TX_PKT_TIMER               NETXEN_NIC_REG(0x94)
-#define CRB_RX_PKT_CNT                 NETXEN_NIC_REG(0x98)
-#define CRB_RX_TMR_CNT                 NETXEN_NIC_REG(0x9c)
-#define CRB_INT_THRESH          NETXEN_NIC_REG(0xa4)
-
-/* Register for communicating XG link status */
-#define CRB_XG_STATE                   NETXEN_NIC_REG(0xa0)
-
-/* Register for communicating card temperature */
-/* Upper 16 bits are temperature value. Lower 16 bits are the state */
-#define CRB_TEMP_STATE          NETXEN_NIC_REG(0xa8)
-#define nx_get_temp_val(x)          ((x) >> 16)
-#define nx_get_temp_state(x)      ((x) & 0xffff)
-#define nx_encode_temp(val, state)     (((val) << 16) | (state))
-
-/* Debug registers for controlling NIC pkt gen agent */
-#define CRB_AGENT_GO                   NETXEN_NIC_REG(0xb0)
-#define CRB_AGENT_TX_SIZE              NETXEN_NIC_REG(0xb4)
-#define CRB_AGENT_TX_TYPE              NETXEN_NIC_REG(0xb8)
-#define CRB_AGENT_TX_ADDR              NETXEN_NIC_REG(0xbc)
-#define CRB_AGENT_TX_MSS               NETXEN_NIC_REG(0xc0)
-
-/* Debug registers for observing NIC performance */
-#define CRB_TX_STATE                   NETXEN_NIC_REG(0xd0)
-#define CRB_TX_COUNT                   NETXEN_NIC_REG(0xd4)
-#define CRB_RX_STATE                   NETXEN_NIC_REG(0xd8)
+#define nx_get_temp_val(x)             ((x) >> 16)
+#define nx_get_temp_state(x)           ((x) & 0xffff)
+#define nx_encode_temp(val, state)     (((val) << 16) | (state))
 
 /* CRB registers per Rcv Descriptor ring */
 struct netxen_rcv_desc_crb {
        u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
        u32 crb_rcv_consumer_offset;
        u32 crb_globalrcv_ring;
+       u32 crb_rcv_ring_size;
 };
 
 /*
- * CRB registers used by the receive peg logic. One instance of these
- * needs to be instantiated per instance of the receive peg.
+ * CRB registers used by the receive peg logic.
  */
 
 struct netxen_recv_crb {
@@ -127,6 +132,7 @@ struct netxen_recv_crb {
        u32 crb_rcv_status_producer;
        u32 crb_rcv_status_consumer;
        u32 crb_rcvpeg_state;
+       u32 crb_status_ring_size;
 };
 
 #if defined(DEFINE_GLOBAL_RECV_CRB)
@@ -139,30 +145,48 @@ struct netxen_recv_crb recv_crb_registers[] = {
         {
          {
           /* crb_rcv_producer_offset: */
-          NETXEN_NIC_REG(0x18),
+          NETXEN_NIC_REG(0x100),
           /* crb_rcv_consumer_offset: */
-          NETXEN_NIC_REG(0x1c),
+          NETXEN_NIC_REG(0x104),
           /* crb_gloablrcv_ring: */
-          NETXEN_NIC_REG(0x20),
+          NETXEN_NIC_REG(0x108),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x10c),
+
           },
          /* Jumbo frames */
          {
           /* crb_rcv_producer_offset: */
-          NETXEN_NIC_REG(0x100),
+          NETXEN_NIC_REG(0x110),
           /* crb_rcv_consumer_offset: */
-          NETXEN_NIC_REG(0x104),
+          NETXEN_NIC_REG(0x114),
           /* crb_gloablrcv_ring: */
-          NETXEN_NIC_REG(0x108),
+          NETXEN_NIC_REG(0x118),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x11c),
+          },
+         /* LRO */
+         {
+          /* crb_rcv_producer_offset: */
+          NETXEN_NIC_REG(0x120),
+          /* crb_rcv_consumer_offset: */
+          NETXEN_NIC_REG(0x124),
+          /* crb_gloablrcv_ring: */
+          NETXEN_NIC_REG(0x128),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x12c),
           }
          },
         /* crb_rcvstatus_ring: */
-        NETXEN_NIC_REG(0x24),
+        NETXEN_NIC_REG(0x130),
         /* crb_rcv_status_producer: */
-        NETXEN_NIC_REG(0x28),
+        NETXEN_NIC_REG(0x134),
         /* crb_rcv_status_consumer: */
-        NETXEN_NIC_REG(0x2c),
+        NETXEN_NIC_REG(0x138),
         /* crb_rcvpeg_state: */
-        NETXEN_NIC_REG(0x48),
+        NETXEN_NIC_REG(0x13c),
+        /* crb_status_ring_size */
+        NETXEN_NIC_REG(0x140),
 
         },
        /*
@@ -173,34 +197,66 @@ struct netxen_recv_crb recv_crb_registers[] = {
         {
          {
           /* crb_rcv_producer_offset: */
-          NETXEN_NIC_REG(0x80),
+          NETXEN_NIC_REG(0x144),
           /* crb_rcv_consumer_offset: */
-          NETXEN_NIC_REG(0x84),
+          NETXEN_NIC_REG(0x148),
           /* crb_globalrcv_ring: */
-          NETXEN_NIC_REG(0x88),
+          NETXEN_NIC_REG(0x14c),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x150),
+
           },
          /* Jumbo frames */
          {
           /* crb_rcv_producer_offset: */
-          NETXEN_NIC_REG(0x10C),
+          NETXEN_NIC_REG(0x154),
           /* crb_rcv_consumer_offset: */
-          NETXEN_NIC_REG(0x110),
+          NETXEN_NIC_REG(0x158),
           /* crb_globalrcv_ring: */
-          NETXEN_NIC_REG(0x114),
+          NETXEN_NIC_REG(0x15c),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x160),
+          },
+         /* LRO */
+         {
+          /* crb_rcv_producer_offset: */
+          NETXEN_NIC_REG(0x164),
+          /* crb_rcv_consumer_offset: */
+          NETXEN_NIC_REG(0x168),
+          /* crb_globalrcv_ring: */
+          NETXEN_NIC_REG(0x16c),
+          /* crb_rcv_ring_size */
+          NETXEN_NIC_REG(0x170),
           }
+
          },
         /* crb_rcvstatus_ring: */
-        NETXEN_NIC_REG(0x8c),
+        NETXEN_NIC_REG(0x174),
         /* crb_rcv_status_producer: */
-        NETXEN_NIC_REG(0x90),
+        NETXEN_NIC_REG(0x178),
         /* crb_rcv_status_consumer: */
-        NETXEN_NIC_REG(0x94),
+        NETXEN_NIC_REG(0x17c),
         /* crb_rcvpeg_state: */
-        NETXEN_NIC_REG(0x98),
+        NETXEN_NIC_REG(0x180),
+        /* crb_status_ring_size */
+        NETXEN_NIC_REG(0x184),
+
         },
 };
+
+u64 ctx_addr_sig_regs[][3] = {
+       {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+       {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+       {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+       {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
 #else
 extern struct netxen_recv_crb recv_crb_registers[];
+extern u64 ctx_addr_sig_regs[][3];
+#define CRB_CTX_ADDR_REG_LO            (ctx_addr_sig_regs[0][0])
+#define CRB_CTX_ADDR_REG_HI            (ctx_addr_sig_regs[0][2])
+#define CRB_CTX_SIGNATURE_REG       (ctx_addr_sig_regs[0][1])
 #endif                         /* DEFINE_GLOBAL_RECEIVE_CRB */
 
 /*
index 26e42f6e9fb10323ec9dcb492b6b1af86ebda147..196993a29b09d2f020d6606eb72f0a4aa8bb5263 100644 (file)
@@ -1335,7 +1335,7 @@ int __init init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(dev_ni52);
        release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
index 340ad0d5388aa9ff20fd98f9d526793f8b68611e..1578f4d984987eed6dfd0320b961721df4fe8068 100644 (file)
@@ -1259,7 +1259,7 @@ int __init init_module(void)
        return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(dev_ni65);
        cleanup_card(dev_ni65);
index b0127c71a5b6f5a3fabb1950bcf57cab459124e9..568daeb3e9d8f56ac0e4b37249c5d2b9507dc66f 100644 (file)
@@ -414,10 +414,10 @@ struct rx_info {
 
        struct sk_buff  *skbs[NR_RX_DESC];
 
-       u32             *next_rx_desc;
+       __le32          *next_rx_desc;
        u16             next_rx, next_empty;
 
-       u32             *descs;
+       __le32          *descs;
        dma_addr_t      phy_descs;
 };
 
@@ -427,6 +427,7 @@ struct ns83820 {
        u8                      __iomem *base;
 
        struct pci_dev          *pci_dev;
+       struct net_device       *ndev;
 
 #ifdef NS83820_VLAN_ACCEL_SUPPORT
        struct vlan_group       *vlgrp;
@@ -459,7 +460,7 @@ struct ns83820 {
        struct sk_buff  *tx_skbs[NR_TX_DESC];
 
        char            pad[16] __attribute__((aligned(16)));
-       u32             *tx_descs;
+       __le32          *tx_descs;
        dma_addr_t      tx_phy_descs;
 
        struct timer_list       tx_watchdog;
@@ -533,7 +534,7 @@ static void ns83820_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid
  * conditions, still route realtime traffic with as low jitter as
  * possible.
  */
-static inline void build_rx_desc(struct ns83820 *dev, u32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
+static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf, u32 cmdsts, u32 extsts)
 {
        desc_addr_set(desc + DESC_LINK, link);
        desc_addr_set(desc + DESC_BUFPTR, buf);
@@ -547,7 +548,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
 {
        unsigned next_empty;
        u32 cmdsts;
-       u32 *sg;
+       __le32 *sg;
        dma_addr_t buf;
 
        next_empty = dev->rx_info.next_empty;
@@ -631,10 +632,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev)
 }
 
 /* REFILL */
-static inline void queue_refill(void *_dev)
+static inline void queue_refill(struct work_struct *work)
 {
-       struct net_device *ndev = _dev;
-       struct ns83820 *dev = PRIV(ndev);
+       struct ns83820 *dev = container_of(work, struct ns83820, tq_refill);
+       struct net_device *ndev = dev->ndev;
 
        rx_refill(ndev, GFP_KERNEL);
        if (dev->rx_info.up)
@@ -874,7 +875,8 @@ static void fastcall rx_irq(struct net_device *ndev)
        struct rx_info *info = &dev->rx_info;
        unsigned next_rx;
        int rx_rc, len;
-       u32 cmdsts, *desc;
+       u32 cmdsts;
+       __le32 *desc;
        unsigned long flags;
        int nr = 0;
 
@@ -1010,7 +1012,8 @@ static inline void kick_tx(struct ns83820 *dev)
 static void do_tx_done(struct net_device *ndev)
 {
        struct ns83820 *dev = PRIV(ndev);
-       u32 cmdsts, tx_done_idx, *desc;
+       u32 cmdsts, tx_done_idx;
+       __le32 *desc;
 
        dprintk("do_tx_done(%p)\n", ndev);
        tx_done_idx = dev->tx_done_idx;
@@ -1077,7 +1080,7 @@ static void ns83820_cleanup_tx(struct ns83820 *dev)
                struct sk_buff *skb = dev->tx_skbs[i];
                dev->tx_skbs[i] = NULL;
                if (skb) {
-                       u32 *desc = dev->tx_descs + (i * DESC_SIZE);
+                       __le32 *desc = dev->tx_descs + (i * DESC_SIZE);
                        pci_unmap_single(dev->pci_dev,
                                        desc_addr_get(desc + DESC_BUFPTR),
                                        le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
@@ -1107,7 +1110,7 @@ static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        skb_frag_t *frag;
        int stopped = 0;
        int do_intr = 0;
-       volatile u32 *first_desc;
+       volatile __le32 *first_desc;
 
        dprintk("ns83820_hard_start_xmit\n");
 
@@ -1180,7 +1183,7 @@ again:
        first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
 
        for (;;) {
-               volatile u32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
+               volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
 
                dprintk("frag[%3u]: %4u @ 0x%08Lx\n", free_idx, len,
                        (unsigned long long)buf);
@@ -1455,7 +1458,8 @@ static int ns83820_stop(struct net_device *ndev)
 static void ns83820_tx_timeout(struct net_device *ndev)
 {
        struct ns83820 *dev = PRIV(ndev);
-        u32 tx_done_idx, *desc;
+        u32 tx_done_idx;
+       __le32 *desc;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->tx_lock, flags);
@@ -1841,6 +1845,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
 
        ndev = alloc_etherdev(sizeof(struct ns83820));
        dev = PRIV(ndev);
+       dev->ndev = ndev;
        err = -ENOMEM;
        if (!dev)
                goto out;
@@ -1853,7 +1858,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
        SET_MODULE_OWNER(ndev);
        SET_NETDEV_DEV(ndev, &pci_dev->dev);
 
-       INIT_WORK(&dev->tq_refill, queue_refill, ndev);
+       INIT_WORK(&dev->tq_refill, queue_refill);
        tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
 
        err = pci_enable_device(pci_dev);
index 046009928526c08e07f8c841ce009fc51f2dc81b..794cc61819dd1393355aef9b9a1d4fd5ac9ae20e 100644 (file)
@@ -338,7 +338,6 @@ static int tc574_config(struct pcmcia_device *link)
        struct net_device *dev = link->priv;
        struct el3_private *lp = netdev_priv(dev);
        tuple_t tuple;
-       cisparse_t parse;
        unsigned short buf[32];
        int last_fn, last_ret, i, j;
        kio_addr_t ioaddr;
@@ -350,17 +349,6 @@ static int tc574_config(struct pcmcia_device *link)
 
        DEBUG(0, "3c574_config(0x%p)\n", link);
 
-       tuple.Attributes = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleDataMax = 64;
-       tuple.TupleOffset = 0;
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        link->io.IOAddrLines = 16;
        for (i = j = 0; j < 0x400; j += 0x20) {
                link->io.BasePort1 = j ^ 0x300;
@@ -382,6 +370,10 @@ static int tc574_config(struct pcmcia_device *link)
        /* The 3c574 normally uses an EEPROM for configuration info, including
           the hardware address.  The future products may include a modem chip
           and put the address in the CIS. */
+       tuple.Attributes = 0;
+       tuple.TupleData = (cisdata_t *)buf;
+       tuple.TupleDataMax = 64;
+       tuple.TupleOffset = 0;
        tuple.DesiredTuple = 0x88;
        if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
                pcmcia_get_tuple_data(link, &tuple);
@@ -397,12 +389,9 @@ static int tc574_config(struct pcmcia_device *link)
                        goto failed;
                }
        }
-       tuple.DesiredTuple = CISTPL_VERS_1;
-       if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS &&
-               pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS &&
-               pcmcia_parse_tuple(link, &tuple, &parse) == CS_SUCCESS) {
-               cardname = parse.version_1.str + parse.version_1.ofs[1];
-       } else
+       if (link->prod_id[1])
+               cardname = link->prod_id[1];
+       else
                cardname = "3Com 3c574";
 
        {
index 231fa2c9ec6c4580b4abd23f45ae3513a9feb291..1e73ff7d5d8e254c327877f140e76f68d1e31ce2 100644 (file)
@@ -253,7 +253,6 @@ static int tc589_config(struct pcmcia_device *link)
     struct net_device *dev = link->priv;
     struct el3_private *lp = netdev_priv(dev);
     tuple_t tuple;
-    cisparse_t parse;
     u16 buf[32], *phys_addr;
     int last_fn, last_ret, i, j, multi = 0, fifo;
     kio_addr_t ioaddr;
@@ -263,26 +262,16 @@ static int tc589_config(struct pcmcia_device *link)
 
     phys_addr = (u16 *)dev->dev_addr;
     tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-    
-    /* Is this a 3c562? */
-    tuple.DesiredTuple = CISTPL_MANFID;
     tuple.Attributes = TUPLE_RETURN_COMMON;
-    if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
-       (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
-       if (le16_to_cpu(buf[0]) != MANFID_3COM)
+
+    /* Is this a 3c562? */
+    if (link->manf_id != MANFID_3COM)
            printk(KERN_INFO "3c589_cs: hmmm, is this really a "
                   "3Com card??\n");
-       multi = (le16_to_cpu(buf[1]) == PRODID_3COM_3C562);
-    }
+    multi = (link->card_id == PRODID_3COM_3C562);
 
     /* For the 3c562, the base address must be xx00-xx7f */
     link->io.IOAddrLines = 16;
index 5ddd5742f7794f47c0f129d3c846b99df7089a45..6139048f81176547975dfac60c9da61c578aa57e 100644 (file)
@@ -299,11 +299,7 @@ static int axnet_config(struct pcmcia_device *link)
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
+
     /* don't trust the CIS on this; Linksys got it wrong */
     link->conf.Present = 0x63;
 
index 48434d7924ebaa2943d0475f49028dc30c9b95cb..91f65e91cd5f488eebe858eab7c2403ec19dcdc7 100644 (file)
@@ -249,12 +249,9 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
 static int com20020_config(struct pcmcia_device *link)
 {
     struct arcnet_local *lp;
-    tuple_t tuple;
-    cisparse_t parse;
     com20020_dev_t *info;
     struct net_device *dev;
     int i, last_ret, last_fn;
-    u_char buf[64];
     int ioaddr;
 
     info = link->priv;
@@ -264,16 +261,6 @@ static int com20020_config(struct pcmcia_device *link)
 
     DEBUG(0, "com20020_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-
     DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1);
     i = !CS_SUCCESS;
     if (!link->io.BasePort1)
index 65f6fdf437255125b6d5f20257e74c7df4b8d5c0..0d7de617e535f41a08619255ae884cd3ca76bae8 100644 (file)
@@ -342,7 +342,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
     tuple_t tuple;
     cisparse_t parse;
     u_short buf[32];
-    int i, last_fn, last_ret, ret;
+    int i, last_fn = 0, last_ret = 0, ret;
     kio_addr_t ioaddr;
     cardtype_t cardtype;
     char *card_name = "unknown";
@@ -350,21 +350,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
 
     DEBUG(0, "fmvj18x_config(0x%p)\n", link);
 
-    /*
-       This reads the card's CONFIG tuple to find its configuration
-       registers.
-    */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     tuple.TupleData = (u_char *)buf;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-
-    link->conf.ConfigBase = parse.config.base; 
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.DesiredTuple = CISTPL_FUNCE;
     tuple.TupleOffset = 0;
     if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) {
@@ -374,17 +362,12 @@ static int fmvj18x_config(struct pcmcia_device *link)
        CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
        CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
        link->conf.ConfigIndex = parse.cftable_entry.index;
-       tuple.DesiredTuple = CISTPL_MANFID;
-       if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
-           CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       else
-           buf[0] = 0xffff;
-       switch (le16_to_cpu(buf[0])) {
+       switch (link->manf_id) {
        case MANFID_TDK:
            cardtype = TDK;
-           if (le16_to_cpu(buf[1]) == PRODID_TDK_GN3410
-                       || le16_to_cpu(buf[1]) == PRODID_TDK_NP9610
-                       || le16_to_cpu(buf[1]) == PRODID_TDK_MN3200) {
+           if (link->card_id == PRODID_TDK_GN3410
+                       || link->card_id == PRODID_TDK_NP9610
+                       || link->card_id == PRODID_TDK_MN3200) {
                /* MultiFunction Card */
                link->conf.ConfigBase = 0x800;
                link->conf.ConfigIndex = 0x47;
@@ -395,11 +378,11 @@ static int fmvj18x_config(struct pcmcia_device *link)
            cardtype = CONTEC;
            break;
        case MANFID_FUJITSU:
-           if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10302)
+           if (link->card_id == PRODID_FUJITSU_MBH10302)
                 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
                    but these are MBH10304 based card. */ 
                cardtype = MBH10304;
-           else if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304)
+           else if (link->card_id == PRODID_FUJITSU_MBH10304)
                cardtype = MBH10304;
            else
                cardtype = LA501;
@@ -409,14 +392,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
        }
     } else {
        /* old type card */
-       tuple.DesiredTuple = CISTPL_MANFID;
-       if (pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS)
-           CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       else
-           buf[0] = 0xffff;
-       switch (le16_to_cpu(buf[0])) {
+       switch (link->manf_id) {
        case MANFID_FUJITSU:
-           if (le16_to_cpu(buf[1]) == PRODID_FUJITSU_MBH10304) {
+           if (link->card_id == PRODID_FUJITSU_MBH10304) {
                cardtype = XXX10304;    /* MBH10304 with buggy CIS */
                link->conf.ConfigIndex = 0x20;
            } else {
index bc0ca41a054274e19b1e426061a3fa246ca9c233..a956a51d284f65843802c8275f96679b78c63699 100644 (file)
@@ -222,24 +222,12 @@ static int ibmtr_config(struct pcmcia_device *link)
     ibmtr_dev_t *info = link->priv;
     struct net_device *dev = info->dev;
     struct tok_info *ti = netdev_priv(dev);
-    tuple_t tuple;
-    cisparse_t parse;
     win_req_t req;
     memreq_t mem;
     int i, last_ret, last_fn;
-    u_char buf[64];
 
     DEBUG(0, "ibmtr_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
     link->conf.ConfigIndex = 0x61;
 
     /* Determine if this is PRIMARY or ALTERNATE. */
index e77110e4c288b15bc40e58168af234bd874ac624..3b707747a8113f6ecc015411469c227e29d3d4a0 100644 (file)
@@ -656,23 +656,12 @@ static int nmclan_config(struct pcmcia_device *link)
   struct net_device *dev = link->priv;
   mace_private *lp = netdev_priv(dev);
   tuple_t tuple;
-  cisparse_t parse;
   u_char buf[64];
   int i, last_ret, last_fn;
   kio_addr_t ioaddr;
 
   DEBUG(0, "nmclan_config(0x%p)\n", link);
 
-  tuple.Attributes = 0;
-  tuple.TupleData = buf;
-  tuple.TupleDataMax = 64;
-  tuple.TupleOffset = 0;
-  tuple.DesiredTuple = CISTPL_CONFIG;
-  CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-  CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-  CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-  link->conf.ConfigBase = parse.config.base;
-
   CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
   CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
   CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -686,6 +675,7 @@ static int nmclan_config(struct pcmcia_device *link)
   tuple.TupleData = buf;
   tuple.TupleDataMax = 64;
   tuple.TupleOffset = 0;
+  tuple.Attributes = 0;
   CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
   CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
   memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
index c51cc5d8789ab1dbf5fef6fd83e5fffe1cab853e..2b1238e2dbdbe077ed41a045829fda5b13bfda58 100644 (file)
@@ -519,31 +519,15 @@ static int pcnet_config(struct pcmcia_device *link)
     tuple_t tuple;
     cisparse_t parse;
     int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
-    int manfid = 0, prodid = 0, has_shmem = 0;
+    int has_shmem = 0;
     u_short buf[64];
     hw_info_t *hw_info;
 
     DEBUG(0, "pcnet_config(0x%p)\n", link);
 
-    tuple.Attributes = 0;
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleDataMax = sizeof(buf);
     tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
-    tuple.DesiredTuple = CISTPL_MANFID;
-    tuple.Attributes = TUPLE_RETURN_COMMON;
-    if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
-       (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS)) {
-       manfid = le16_to_cpu(buf[0]);
-       prodid = le16_to_cpu(buf[1]);
-    }
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -589,8 +573,8 @@ static int pcnet_config(struct pcmcia_device *link)
        link->conf.Attributes |= CONF_ENABLE_SPKR;
        link->conf.Status = CCSR_AUDIO_ENA;
     }
-    if ((manfid == MANFID_IBM) &&
-       (prodid == PRODID_IBM_HOME_AND_AWAY))
+    if ((link->manf_id == MANFID_IBM) &&
+       (link->card_id == PRODID_IBM_HOME_AND_AWAY))
        link->conf.ConfigIndex |= 0x10;
 
     CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
@@ -624,10 +608,10 @@ static int pcnet_config(struct pcmcia_device *link)
     info->flags = hw_info->flags;
     /* Check for user overrides */
     info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
-    if ((manfid == MANFID_SOCKET) &&
-       ((prodid == PRODID_SOCKET_LPE) ||
-        (prodid == PRODID_SOCKET_LPE_CF) ||
-        (prodid == PRODID_SOCKET_EIO)))
+    if ((link->manf_id == MANFID_SOCKET) &&
+       ((link->card_id == PRODID_SOCKET_LPE) ||
+        (link->card_id == PRODID_SOCKET_LPE_CF) ||
+        (link->card_id == PRODID_SOCKET_EIO)))
        info->flags &= ~USE_BIG_BUF;
     if (!use_big_buf)
        info->flags &= ~USE_BIG_BUF;
index 20fcc357620256f9043022f40b2857200a6a5a21..530df8883fe5a207a7d6465604ad11ce7327e1b5 100644 (file)
@@ -560,16 +560,8 @@ static int mhz_setup(struct pcmcia_device *link)
 
     /* Read the station address from the CIS.  It is stored as the last
        (fourth) string in the Version 1 Version/ID tuple. */
-    tuple->DesiredTuple = CISTPL_VERS_1;
-    if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
-       rc = -1;
-       goto free_cfg_mem;
-    }
-    /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
-    if (next_tuple(link, tuple, parse) != CS_SUCCESS)
-       first_tuple(link, tuple, parse);
-    if (parse->version_1.ns > 3) {
-       station_addr = parse->version_1.str + parse->version_1.ofs[3];
+    if (link->prod_id[3]) {
+       station_addr = link->prod_id[3];
        if (cvt_ascii_address(dev, station_addr) == 0) {
                rc = 0;
                goto free_cfg_mem;
@@ -744,15 +736,12 @@ static int smc_setup(struct pcmcia_device *link)
        }
     }
     /* Try the third string in the Version 1 Version/ID tuple. */
-    tuple->DesiredTuple = CISTPL_VERS_1;
-    if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
-       rc = -1;
-       goto free_cfg_mem;
-    }
-    station_addr = parse->version_1.str + parse->version_1.ofs[2];
-    if (cvt_ascii_address(dev, station_addr) == 0) {
-       rc = 0;
-       goto free_cfg_mem;
+    if (link->prod_id[2]) {
+       station_addr = link->prod_id[2];
+       if (cvt_ascii_address(dev, station_addr) == 0) {
+               rc = 0;
+               goto free_cfg_mem;
+       }
     }
 
     rc = -1;
@@ -970,10 +959,6 @@ static int smc91c92_config(struct pcmcia_device *link)
 {
     struct net_device *dev = link->priv;
     struct smc_private *smc = netdev_priv(dev);
-    struct smc_cfg_mem *cfg_mem;
-    tuple_t *tuple;
-    cisparse_t *parse;
-    u_char *buf;
     char *name;
     int i, j, rev;
     kio_addr_t ioaddr;
@@ -981,30 +966,8 @@ static int smc91c92_config(struct pcmcia_device *link)
 
     DEBUG(0, "smc91c92_config(0x%p)\n", link);
 
-    cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
-    if (!cfg_mem)
-       goto config_failed;
-
-    tuple = &cfg_mem->tuple;
-    parse = &cfg_mem->parse;
-    buf = cfg_mem->buf;
-
-    tuple->Attributes = tuple->TupleOffset = 0;
-    tuple->TupleData = (cisdata_t *)buf;
-    tuple->TupleDataMax = 64;
-
-    tuple->DesiredTuple = CISTPL_CONFIG;
-    i = first_tuple(link, tuple, parse);
-    CS_EXIT_TEST(i, ParseTuple, config_failed);
-    link->conf.ConfigBase = parse->config.base;
-    link->conf.Present = parse->config.rmask[0];
-
-    tuple->DesiredTuple = CISTPL_MANFID;
-    tuple->Attributes = TUPLE_RETURN_COMMON;
-    if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
-       smc->manfid = parse->manfid.manf;
-       smc->cardid = parse->manfid.card;
-    }
+    smc->manfid = link->manf_id;
+    smc->cardid = link->card_id;
 
     if ((smc->manfid == MANFID_OSITECH) &&
        (smc->cardid != PRODID_OSITECH_SEVEN)) {
@@ -1134,14 +1097,12 @@ static int smc91c92_config(struct pcmcia_device *link)
            printk(KERN_NOTICE "  No MII transceivers found!\n");
        }
     }
-    kfree(cfg_mem);
     return 0;
 
 config_undo:
     unregister_netdev(dev);
 config_failed:                 /* CS_EXIT_TEST() calls jump to here... */
     smc91c92_release(link);
-    kfree(cfg_mem);
     return -ENODEV;
 } /* smc91c92_config */
 
index f3914f58d67f2ca95acc1a99403523c41bc4592d..8478dca3d8d19a032f949ac3e2cbe6c003e2b663 100644 (file)
@@ -332,6 +332,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
  */
 
 typedef struct local_info_t {
+       struct net_device       *dev;
        struct pcmcia_device    *p_dev;
     dev_node_t node;
     struct net_device_stats stats;
@@ -353,7 +354,7 @@ typedef struct local_info_t {
  */
 static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void do_tx_timeout(struct net_device *dev);
-static void xirc2ps_tx_timeout_task(void *data);
+static void xirc2ps_tx_timeout_task(struct work_struct *work);
 static struct net_device_stats *do_get_stats(struct net_device *dev);
 static void set_addresses(struct net_device *dev);
 static void set_multicast_list(struct net_device *dev);
@@ -567,6 +568,7 @@ xirc2ps_probe(struct pcmcia_device *link)
     if (!dev)
            return -ENOMEM;
     local = netdev_priv(dev);
+    local->dev = dev;
     local->p_dev = link;
     link->priv = dev;
 
@@ -591,7 +593,7 @@ xirc2ps_probe(struct pcmcia_device *link)
 #ifdef HAVE_TX_TIMEOUT
     dev->tx_timeout = do_tx_timeout;
     dev->watchdog_timeo = TX_TIMEOUT;
-    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
+    INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task);
 #endif
 
     return xirc2ps_config(link);
@@ -707,22 +709,11 @@ set_card_type(struct pcmcia_device *link, const void *s)
  * Returns: true if this is a CE2
  */
 static int
-has_ce2_string(struct pcmcia_device * link)
+has_ce2_string(struct pcmcia_device * p_dev)
 {
-    tuple_t tuple;
-    cisparse_t parse;
-    u_char buf[256];
-
-    tuple.Attributes = 0;
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = 254;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_VERS_1;
-    if (!first_tuple(link, &tuple, &parse) && parse.version_1.ns > 2) {
-       if (strstr(parse.version_1.str + parse.version_1.ofs[2], "CE2"))
-           return 1;
-    }
-    return 0;
+       if (p_dev->prod_id[2] && strstr(p_dev->prod_id[2], "CE2"))
+               return 1;
+       return 0;
 }
 
 /****************
@@ -792,13 +783,6 @@ xirc2ps_config(struct pcmcia_device * link)
        goto failure;
     }
 
-    /* get configuration stuff */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    if ((err=first_tuple(link, &tuple, &parse)))
-       goto cis_error;
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present =    parse.config.rmask[0];
-
     /* get the ethernet address from the CIS */
     tuple.DesiredTuple = CISTPL_FUNCE;
     for (err = first_tuple(link, &tuple, &parse); !err;
@@ -1062,8 +1046,6 @@ xirc2ps_config(struct pcmcia_device * link)
     xirc2ps_release(link);
     return -ENODEV;
 
-  cis_error:
-    printk(KNOT_XIRC "unable to parse CIS\n");
   failure:
     return -ENODEV;
 } /* xirc2ps_config */
@@ -1344,9 +1326,11 @@ xirc2ps_interrupt(int irq, void *dev_id)
 /*====================================================================*/
 
 static void
-xirc2ps_tx_timeout_task(void *data)
+xirc2ps_tx_timeout_task(struct work_struct *work)
 {
-    struct net_device *dev = data;
+       local_info_t *local =
+               container_of(work, local_info_t, tx_timeout_task);
+       struct net_device *dev = local->dev;
     /* reset the card */
     do_reset(dev,1);
     dev->trans_start = jiffies;
index f14e99276dbac0bc2ea40956718b39c8eae10436..096d4a100bf2d266c368de248e24e500972e2e63 100644 (file)
@@ -254,7 +254,7 @@ static int fixed_mdio_register_device(int number, int speed, int duplex)
                goto device_create_fail;
        }
 
-       phydev->irq = -1;
+       phydev->irq = PHY_IGNORE_INTERRUPT;
        phydev->dev.bus = &mdio_bus_type;
 
        if(number)
index 88237bdb525503d7f9fa9d53462ec6cc6ab75bd7..e175f3910b18e53711429e9d940ef01814469483 100644 (file)
@@ -397,7 +397,7 @@ out_unlock:
 EXPORT_SYMBOL(phy_start_aneg);
 
 
-static void phy_change(void *data);
+static void phy_change(struct work_struct *work);
 static void phy_timer(unsigned long data);
 
 /* phy_start_machine:
@@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev)
 {
        int err = 0;
 
-       INIT_WORK(&phydev->phy_queue, phy_change, phydev);
+       INIT_WORK(&phydev->phy_queue, phy_change);
 
        if (request_irq(phydev->irq, phy_interrupt,
                                IRQF_SHARED,
@@ -587,8 +587,7 @@ int phy_stop_interrupts(struct phy_device *phydev)
         * Finish any pending work; we might have been scheduled
         * to be called from keventd ourselves, though.
         */
-       if (!current_is_keventd())
-               flush_scheduled_work();
+       run_scheduled_work(&phydev->phy_queue);
 
        free_irq(phydev->irq, phydev);
 
@@ -598,10 +597,11 @@ EXPORT_SYMBOL(phy_stop_interrupts);
 
 
 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
-static void phy_change(void *data)
+static void phy_change(struct work_struct *work)
 {
        int err;
-       struct phy_device *phydev = data;
+       struct phy_device *phydev =
+               container_of(work, struct phy_device, phy_queue);
 
        err = phy_disable_interrupts(phydev);
 
index 71afb274498f5558f1c2f33334441503d43a1a19..6bb085f5443700f63f3d48e67d372230fa7f120d 100644 (file)
@@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG;
 #define PLIP_NIBBLE_WAIT        3000
 
 /* Bottom halves */
-static void plip_kick_bh(struct net_device *dev);
-static void plip_bh(struct net_device *dev);
-static void plip_timer_bh(struct net_device *dev);
+static void plip_kick_bh(struct work_struct *work);
+static void plip_bh(struct work_struct *work);
+static void plip_timer_bh(struct work_struct *work);
 
 /* Interrupt handler */
 static void plip_interrupt(int irq, void *dev_id);
@@ -207,9 +207,10 @@ struct plip_local {
 
 struct net_local {
        struct net_device_stats enet_stats;
+       struct net_device *dev;
        struct work_struct immediate;
-       struct work_struct deferred;
-       struct work_struct timer;
+       struct delayed_work deferred;
+       struct delayed_work timer;
        struct plip_local snd_data;
        struct plip_local rcv_data;
        struct pardevice *pardev;
@@ -306,11 +307,11 @@ plip_init_netdev(struct net_device *dev)
        nl->nibble      = PLIP_NIBBLE_WAIT;
 
        /* Initialize task queue structures */
-       INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
-       INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
+       INIT_WORK(&nl->immediate, plip_bh);
+       INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
 
        if (dev->irq == -1)
-               INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
+               INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
 
        spin_lock_init(&nl->lock);
 }
@@ -319,9 +320,10 @@ plip_init_netdev(struct net_device *dev)
    This routine is kicked by do_timer().
    Request `plip_bh' to be invoked. */
 static void
-plip_kick_bh(struct net_device *dev)
+plip_kick_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl =
+               container_of(work, struct net_local, deferred.work);
 
        if (nl->is_deferred)
                schedule_work(&nl->immediate);
@@ -362,9 +364,9 @@ static const plip_func connection_state_table[] =
 
 /* Bottom half handler of PLIP. */
 static void
-plip_bh(struct net_device *dev)
+plip_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl = container_of(work, struct net_local, immediate);
        struct plip_local *snd = &nl->snd_data;
        struct plip_local *rcv = &nl->rcv_data;
        plip_func f;
@@ -372,20 +374,21 @@ plip_bh(struct net_device *dev)
 
        nl->is_deferred = 0;
        f = connection_state_table[nl->connection];
-       if ((r = (*f)(dev, nl, snd, rcv)) != OK
-           && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
+       if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
+           && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
                nl->is_deferred = 1;
                schedule_delayed_work(&nl->deferred, 1);
        }
 }
 
 static void
-plip_timer_bh(struct net_device *dev)
+plip_timer_bh(struct work_struct *work)
 {
-       struct net_local *nl = netdev_priv(dev);
+       struct net_local *nl =
+               container_of(work, struct net_local, timer.work);
 
        if (!(atomic_read (&nl->kill_timer))) {
-               plip_interrupt (-1, dev);
+               plip_interrupt (-1, nl->dev);
 
                schedule_delayed_work(&nl->timer, 1);
        }
@@ -1284,6 +1287,7 @@ static void plip_attach (struct parport *port)
                }
 
                nl = netdev_priv(dev);
+               nl->dev = dev;
                nl->pardev = parport_register_device(port, name, plip_preempt,
                                                 plip_wakeup, plip_interrupt,
                                                 0, dev);
index ec640f6229ae53e0a3f75ce4984c95365cba55ac..d79d141a601d572ab255e12c8b5bacd3f4b8946b 100644 (file)
@@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
                               "%s: Another function issued a reset to the "
                               "chip. ISR value = %x.\n", ndev->name, value);
                }
-               queue_work(qdev->workqueue, &qdev->reset_work);
+               queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
                spin_unlock(&qdev->adapter_lock);
        } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
                ql_disable_interrupts(qdev);
@@ -3182,11 +3182,13 @@ static void ql3xxx_tx_timeout(struct net_device *ndev)
        /*
         * Wake up the worker to process this event.
         */
-       queue_work(qdev->workqueue, &qdev->tx_timeout_work);
+       queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
 }
 
-static void ql_reset_work(struct ql3_adapter *qdev)
+static void ql_reset_work(struct work_struct *work)
 {
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, reset_work.work);
        struct net_device *ndev = qdev->ndev;
        u32 value;
        struct ql_tx_buf_cb *tx_cb;
@@ -3278,9 +3280,12 @@ static void ql_reset_work(struct ql3_adapter *qdev)
        }
 }
 
-static void ql_tx_timeout_work(struct ql3_adapter *qdev)
+static void ql_tx_timeout_work(struct work_struct *work)
 {
-       ql_cycle_adapter(qdev,QL_DO_RESET);
+       struct ql3_adapter *qdev =
+               container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+       ql_cycle_adapter(qdev, QL_DO_RESET);
 }
 
 static void ql_get_board_info(struct ql3_adapter *qdev)
@@ -3459,9 +3464,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
        netif_stop_queue(ndev);
 
        qdev->workqueue = create_singlethread_workqueue(ndev->name);
-       INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
-       INIT_WORK(&qdev->tx_timeout_work,
-                 (void (*)(void *))ql_tx_timeout_work, qdev);
+       INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+       INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
 
        init_timer(&qdev->adapter_timer);
        qdev->adapter_timer.function = ql3xxx_timer;
index 65da2c0bfda6b4369d130fa1916efcaa07644461..ea94de7fd0719f2bca4fbb7653a365ecf726e81c 100644 (file)
@@ -1186,8 +1186,8 @@ struct ql3_adapter {
        u32 numPorts;
        struct net_device_stats stats;
        struct workqueue_struct *workqueue;
-       struct work_struct reset_work;
-       struct work_struct tx_timeout_work;
+       struct delayed_work reset_work;
+       struct delayed_work tx_timeout_work;
        u32 max_frame_size;
 };
 
index 45d3ca431957bc43070cd95400a99cd67bf58537..f83b41d4cb0e64f8374b55b894bf916171b8a479 100644 (file)
@@ -225,6 +225,7 @@ MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
 static int rx_copybreak = 200;
 static int use_dac;
+static int ignore_parity_err;
 static struct {
        u32 msg_enable;
 } debug = { -1 };
@@ -424,6 +425,7 @@ struct ring_info {
 struct rtl8169_private {
        void __iomem *mmio_addr;        /* memory map physical address */
        struct pci_dev *pci_dev;        /* Index of PCI device */
+       struct net_device *dev;
        struct net_device_stats stats;  /* statistics of net device */
        spinlock_t lock;                /* spin lock flag */
        u32 msg_enable;
@@ -455,7 +457,7 @@ struct rtl8169_private {
        void (*phy_reset_enable)(void __iomem *);
        unsigned int (*phy_reset_pending)(void __iomem *);
        unsigned int (*link_ok)(void __iomem *);
-       struct work_struct task;
+       struct delayed_work task;
        unsigned wol_enabled : 1;
 };
 
@@ -469,6 +471,8 @@ module_param(use_dac, int, 0);
 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+module_param_named(ignore_parity_err, ignore_parity_err, bool, 0);
+MODULE_PARM_DESC(ignore_parity_err, "Ignore PCI parity error as target. Default: false");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(RTL8169_VERSION);
 
@@ -1283,11 +1287,6 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
        /* Shazam ! */
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_04) {
-               mdio_write(ioaddr, 31, 0x0001);
-               mdio_write(ioaddr,  9, 0x273a);
-               mdio_write(ioaddr, 14, 0x7bfb);
-               mdio_write(ioaddr, 27, 0x841e);
-
                mdio_write(ioaddr, 31, 0x0002);
                mdio_write(ioaddr,  1, 0x90d0);
                mdio_write(ioaddr, 31, 0x0000);
@@ -1510,6 +1509,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        SET_MODULE_OWNER(dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
        tp = netdev_priv(dev);
+       tp->dev = dev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1782,7 +1782,7 @@ static int rtl8169_open(struct net_device *dev)
        if (retval < 0)
                goto err_free_rx;
 
-       INIT_WORK(&tp->task, NULL, dev);
+       INIT_DELAYED_WORK(&tp->task, NULL);
 
        rtl8169_hw_start(dev);
 
@@ -1815,12 +1815,25 @@ static void rtl8169_hw_reset(void __iomem *ioaddr)
        RTL_R8(ChipCmd);
 }
 
-static void
-rtl8169_hw_start(struct net_device *dev)
+static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       u32 cfg = rtl8169_rx_config;
+
+       cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+       RTL_W32(RxConfig, cfg);
+
+       /* Set DMA burst size and Interframe Gap Time */
+       RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+               (InterFrameGap << TxInterFrameGapShift));
+}
+
+static void rtl8169_hw_start(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
+       u16 cmd;
        u32 i;
 
        /* Soft reset the chip. */
@@ -1833,6 +1846,11 @@ rtl8169_hw_start(struct net_device *dev)
                msleep_interruptible(1);
        }
 
+       if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
+               RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+               pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
                pci_write_config_word(pdev, 0x68, 0x00);
                pci_write_config_word(pdev, 0x69, 0x08);
@@ -1840,8 +1858,6 @@ rtl8169_hw_start(struct net_device *dev)
 
        /* Undocumented stuff. */
        if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
-               u16 cmd;
-
                /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
                if ((RTL_R8(Config2) & 0x07) & 0x01)
                        RTL_W32(0x7c, 0x0007ffff);
@@ -1853,23 +1869,29 @@ rtl8169_hw_start(struct net_device *dev)
                pci_write_config_word(pdev, PCI_COMMAND, cmd);
        }
 
-
        RTL_W8(Cfg9346, Cfg9346_Unlock);
+       if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_04))
+               RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
        RTL_W8(EarlyTxThres, EarlyTxThld);
 
        /* Low hurts. Let's disable the filtering. */
        RTL_W16(RxMaxSize, 16383);
 
-       /* Set Rx Config register */
-       i = rtl8169_rx_config |
-               (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
-       RTL_W32(RxConfig, i);
+       if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
+           (tp->mac_version == RTL_GIGA_MAC_VER_04))
+               RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+               rtl8169_set_rx_tx_config_registers(tp);
 
-       /* Set DMA burst size and Interframe Gap Time */
-       RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
-               (InterFrameGap << TxInterFrameGapShift));
+       cmd = RTL_R16(CPlusCmd);
+       RTL_W16(CPlusCmd, cmd);
 
-       tp->cp_cmd |= RTL_R16(CPlusCmd) | PCIMulRW;
+       tp->cp_cmd |= cmd | PCIMulRW;
 
        if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
            (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
@@ -1895,7 +1917,15 @@ rtl8169_hw_start(struct net_device *dev)
        RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
        RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
        RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
-       RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+
+       if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
+           (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+           (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
+           (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
+               RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+               rtl8169_set_rx_tx_config_registers(tp);
+       }
+
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
@@ -1990,7 +2020,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
        if (!skb)
                goto err_out;
 
-       skb_reserve(skb, align);
+       skb_reserve(skb, (align - 1) & (u32)skb->data);
        *sk_buff = skb;
 
        mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
@@ -2105,11 +2135,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
        tp->cur_tx = tp->dirty_tx = 0;
 }
 
-static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *))
+static void rtl8169_schedule_work(struct net_device *dev, work_func_t task)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       PREPARE_WORK(&tp->task, task, dev);
+       PREPARE_DELAYED_WORK(&tp->task, task);
        schedule_delayed_work(&tp->task, 4);
 }
 
@@ -2128,9 +2158,11 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        netif_poll_enable(dev);
 }
 
-static void rtl8169_reinit_task(void *_data)
+static void rtl8169_reinit_task(struct work_struct *work)
 {
-       struct net_device *dev = _data;
+       struct rtl8169_private *tp =
+               container_of(work, struct rtl8169_private, task.work);
+       struct net_device *dev = tp->dev;
        int ret;
 
        if (netif_running(dev)) {
@@ -2153,10 +2185,11 @@ static void rtl8169_reinit_task(void *_data)
        }
 }
 
-static void rtl8169_reset_task(void *_data)
+static void rtl8169_reset_task(struct work_struct *work)
 {
-       struct net_device *dev = _data;
-       struct rtl8169_private *tp = netdev_priv(dev);
+       struct rtl8169_private *tp =
+               container_of(work, struct rtl8169_private, task.work);
+       struct net_device *dev = tp->dev;
 
        if (!netif_running(dev))
                return;
@@ -2350,12 +2383,17 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
        /*
         * The recovery sequence below admits a very elaborated explanation:
         * - it seems to work;
-        * - I did not see what else could be done.
+        * - I did not see what else could be done;
+        * - it makes iop3xx happy.
         *
         * Feel free to adjust to your needs.
         */
-       pci_write_config_word(pdev, PCI_COMMAND,
-                             pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+       if (ignore_parity_err)
+               pci_cmd &= ~PCI_COMMAND_PARITY;
+       else
+               pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
+
+       pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
 
        pci_write_config_word(pdev, PCI_STATUS,
                pci_status & (PCI_STATUS_DETECTED_PARITY |
@@ -2369,10 +2407,11 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
                tp->cp_cmd &= ~PCIDAC;
                RTL_W16(CPlusCmd, tp->cp_cmd);
                dev->features &= ~NETIF_F_HIGHDMA;
-               rtl8169_schedule_work(dev, rtl8169_reinit_task);
        }
 
        rtl8169_hw_reset(ioaddr);
+
+       rtl8169_schedule_work(dev, rtl8169_reinit_task);
 }
 
 static void
@@ -2452,7 +2491,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
 
                skb = dev_alloc_skb(pkt_size + align);
                if (skb) {
-                       skb_reserve(skb, align);
+                       skb_reserve(skb, (align - 1) & (u32)skb->data);
                        eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
                        *sk_buff = skb;
                        rtl8169_mark_to_asic(desc, rx_buf_sz);
index 33569ec9dbfcbc891acc7bf2b0921039abac845a..250cdbeefdfde0da0641c999e6af0f0272351aa0 100644 (file)
@@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr)
  * Description: Sets the link status for the adapter
  */
 
-static void s2io_set_link(unsigned long data)
+static void s2io_set_link(struct work_struct *work)
 {
-       nic_t *nic = (nic_t *) data;
+       nic_t *nic = container_of(work, nic_t, set_link_task);
        struct net_device *dev = nic->dev;
        XENA_dev_config_t __iomem *bar0 = nic->bar0;
        register u64 val64;
@@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp)
  * spin lock.
  */
 
-static void s2io_restart_nic(unsigned long data)
+static void s2io_restart_nic(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *) data;
-       nic_t *sp = dev->priv;
+       nic_t *sp = container_of(work, nic_t, rst_timer_task);
+       struct net_device *dev = sp->dev;
 
        s2io_card_down(sp);
        if (s2io_card_up(sp)) {
@@ -6992,10 +6992,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        dev->tx_timeout = &s2io_tx_watchdog;
        dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
-       INIT_WORK(&sp->rst_timer_task,
-                 (void (*)(void *)) s2io_restart_nic, dev);
-       INIT_WORK(&sp->set_link_task,
-                 (void (*)(void *)) s2io_set_link, sp);
+       INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
+       INIT_WORK(&sp->set_link_task, s2io_set_link);
 
        pci_save_state(sp->pdev);
 
index 12b719f4d00f7d161fe2437f369b168ed8f464fe..3b0bafd273c8c74fdd95b5120d5c9f0c7a9aad42 100644 (file)
@@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id);
 static irqreturn_t s2io_isr(int irq, void *dev_id);
 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static const struct ethtool_ops netdev_ethtool_ops;
-static void s2io_set_link(unsigned long data);
+static void s2io_set_link(struct work_struct *work);
 static int s2io_set_swapper(nic_t * sp);
 static void s2io_card_down(nic_t *nic);
 static int s2io_card_up(nic_t *nic);
index d9d0a3a3c558b349e7c3d0a6147bb8a06538a743..0d6c95c7aedf9b6accd679bd2c29869eeb20e191 100644 (file)
@@ -750,7 +750,7 @@ int __init init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(dev_seeq);
        release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
index aaba458584fbed8e8613a900b4edd8f8f9eb4fd6..b70ed79d412157e09a7f2e08438bb031fb42bf35 100644 (file)
@@ -280,6 +280,7 @@ enum sis190_feature {
 struct sis190_private {
        void __iomem *mmio_addr;
        struct pci_dev *pci_dev;
+       struct net_device *dev;
        struct net_device_stats stats;
        spinlock_t lock;
        u32 rx_buf_sz;
@@ -897,10 +898,11 @@ static void sis190_hw_start(struct net_device *dev)
        netif_start_queue(dev);
 }
 
-static void sis190_phy_task(void * data)
+static void sis190_phy_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct sis190_private *tp = netdev_priv(dev);
+       struct sis190_private *tp =
+               container_of(work, struct sis190_private, phy_task);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->mmio_addr;
        int phy_id = tp->mii_if.phy_id;
        u16 val;
@@ -1047,7 +1049,7 @@ static int sis190_open(struct net_device *dev)
        if (rc < 0)
                goto err_free_rx_1;
 
-       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+       INIT_WORK(&tp->phy_task, sis190_phy_task);
 
        sis190_request_timer(dev);
 
@@ -1436,6 +1438,7 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        tp = netdev_priv(dev);
+       tp->dev = dev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
 
        rc = pci_enable_device(pdev);
@@ -1798,7 +1801,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
 
        sis190_init_rxfilter(dev);
 
-       INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+       INIT_WORK(&tp->phy_task, sis190_phy_task);
 
        dev->open = sis190_open;
        dev->stop = sis190_close;
index ab66d80a4455896d8fe7084c1f3943d50df3567a..3e7aa49afd00565e8a10a13b07c5fd98d6f44f64 100644 (file)
@@ -1319,7 +1319,7 @@ SK_BOOL   AutoNeg)        /* Is Auto-negotiation used ? */
        SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc);
 
 #ifdef xDEBUG
-       if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT) ==
+       if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) ==
                (PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) {
 
                SK_U32  Stat1, Stat2, Stat3;
index 5513907e8393d4188dd727aeea1f484d7598bd92..b60f0451f6cdaddff325b599ae5f37e85a6e5c4e 100644 (file)
@@ -1327,10 +1327,11 @@ static void xm_check_link(struct net_device *dev)
  * Since internal PHY is wired to a level triggered pin, can't
  * get an interrupt when carrier is detected.
  */
-static void xm_link_timer(void *arg)
+static void xm_link_timer(struct work_struct *work)
 {
-       struct net_device *dev = arg;
-       struct skge_port *skge = netdev_priv(arg);
+       struct skge_port *skge =
+               container_of(work, struct skge_port, link_thread.work);
+       struct net_device *dev = skge->netdev;
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
 
@@ -3072,9 +3073,9 @@ static void skge_error_irq(struct skge_hw *hw)
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(void *arg)
+static void skge_extirq(struct work_struct *work)
 {
-       struct skge_hw *hw = arg;
+       struct skge_hw *hw = container_of(work, struct skge_hw, phy_work);
        int port;
 
        mutex_lock(&hw->phy_mutex);
@@ -3456,7 +3457,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
        skge->port = port;
 
        /* Only used for Genesis XMAC */
-       INIT_WORK(&skge->link_thread, xm_link_timer, dev);
+       INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer);
 
        if (hw->chip_id != CHIP_ID_GENESIS) {
                dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3543,7 +3544,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
 
        hw->pdev = pdev;
        mutex_init(&hw->phy_mutex);
-       INIT_WORK(&hw->phy_work, skge_extirq, hw);
+       INIT_WORK(&hw->phy_work, skge_extirq);
        spin_lock_init(&hw->hw_lock);
 
        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
index 537c0aaa1db8de7b327572657fea85003548afda..f6223c533c015bff50618d66bc8c3340c048e337 100644 (file)
@@ -389,10 +389,10 @@ enum {
 /* Packet Arbiter Registers */
 /*     B3_PA_CTRL              16 bit  Packet Arbiter Ctrl Register */
 enum {
-       PA_CLR_TO_TX2   = 1<<13,        /* Clear IRQ Packet Timeout TX2 */
-       PA_CLR_TO_TX1   = 1<<12,        /* Clear IRQ Packet Timeout TX1 */
-       PA_CLR_TO_RX2   = 1<<11,        /* Clear IRQ Packet Timeout RX2 */
-       PA_CLR_TO_RX1   = 1<<10,        /* Clear IRQ Packet Timeout RX1 */
+       PA_CLR_TO_TX2   = 1<<13,/* Clear IRQ Packet Timeout TX2 */
+       PA_CLR_TO_TX1   = 1<<12,/* Clear IRQ Packet Timeout TX1 */
+       PA_CLR_TO_RX2   = 1<<11,/* Clear IRQ Packet Timeout RX2 */
+       PA_CLR_TO_RX1   = 1<<10,/* Clear IRQ Packet Timeout RX1 */
        PA_ENA_TO_TX2   = 1<<9, /* Enable  Timeout Timer TX2 */
        PA_DIS_TO_TX2   = 1<<8, /* Disable Timeout Timer TX2 */
        PA_ENA_TO_TX1   = 1<<7, /* Enable  Timeout Timer TX1 */
@@ -481,14 +481,14 @@ enum {
 /* RAM Buffer Register Offsets */
 enum {
 
-       RB_START        = 0x00,/* 32 bit        RAM Buffer Start Address */
+       RB_START= 0x00,/* 32 bit        RAM Buffer Start Address */
        RB_END  = 0x04,/* 32 bit        RAM Buffer End Address */
        RB_WP   = 0x08,/* 32 bit        RAM Buffer Write Pointer */
        RB_RP   = 0x0c,/* 32 bit        RAM Buffer Read Pointer */
-       RB_RX_UTPP      = 0x10,/* 32 bit        Rx Upper Threshold, Pause Packet */
-       RB_RX_LTPP      = 0x14,/* 32 bit        Rx Lower Threshold, Pause Packet */
-       RB_RX_UTHP      = 0x18,/* 32 bit        Rx Upper Threshold, High Prio */
-       RB_RX_LTHP      = 0x1c,/* 32 bit        Rx Lower Threshold, High Prio */
+       RB_RX_UTPP= 0x10,/* 32 bit      Rx Upper Threshold, Pause Packet */
+       RB_RX_LTPP= 0x14,/* 32 bit      Rx Lower Threshold, Pause Packet */
+       RB_RX_UTHP= 0x18,/* 32 bit      Rx Upper Threshold, High Prio */
+       RB_RX_LTHP= 0x1c,/* 32 bit      Rx Lower Threshold, High Prio */
        /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
        RB_PC   = 0x20,/* 32 bit        RAM Buffer Packet Counter */
        RB_LEV  = 0x24,/* 32 bit        RAM Buffer Level Register */
@@ -532,7 +532,7 @@ enum {
        PHY_ADDR_MARV   = 0,
 };
 
-#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
+#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs))
 
 /* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */
 enum {
@@ -578,15 +578,15 @@ enum {
        MFF_DIS_TIST    = 1<<2, /* Disable Time Stamp Gener */
        MFF_CLR_INTIST  = 1<<1, /* Clear IRQ No Time Stamp */
        MFF_CLR_INSTAT  = 1<<0, /* Clear IRQ No Status */
-#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
+       MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT,
 };
 
 /*     TX_MFF_CTRL1    16 bit  Transmit MAC FIFO Control Reg 1 */
 enum {
-       MFF_CLR_PERR    = 1<<15,        /* Clear Parity Error IRQ */
-                                                               /* Bit 14:      reserved */
-       MFF_ENA_PKT_REC = 1<<13,        /* Enable  Packet Recovery */
-       MFF_DIS_PKT_REC = 1<<12,        /* Disable Packet Recovery */
+       MFF_CLR_PERR    = 1<<15, /* Clear Parity Error IRQ */
+
+       MFF_ENA_PKT_REC = 1<<13, /* Enable  Packet Recovery */
+       MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */
 
        MFF_ENA_W4E     = 1<<7, /* Enable  Wait for Empty */
        MFF_DIS_W4E     = 1<<6, /* Disable Wait for Empty */
@@ -595,9 +595,10 @@ enum {
        MFF_DIS_LOOPB   = 1<<2, /* Disable Loopback */
        MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */
        MFF_SET_MAC_RST = 1<<0, /* Set   XMAC Reset */
+
+       MFF_TX_CTRL_DEF  = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH,
 };
 
-#define MFF_TX_CTRL_DEF        (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
 
 /*     RX_MFF_TST2              8 bit  Receive MAC FIFO Test Register 2 */
 /*     TX_MFF_TST2              8 bit  Transmit MAC FIFO Test Register 2 */
@@ -1304,8 +1305,8 @@ enum {
 
 /* special defines for FIBER (88E1011S only) */
 enum {
-       PHY_M_AN_ASP_X  = 1<<8, /* Asymmetric Pause */
-       PHY_M_AN_PC_X   = 1<<7, /* MAC Pause implemented */
+       PHY_M_AN_ASP_X          = 1<<8, /* Asymmetric Pause */
+       PHY_M_AN_PC_X           = 1<<7, /* MAC Pause implemented */
        PHY_M_AN_1000X_AHD      = 1<<6, /* Advertise 10000Base-X Half Duplex */
        PHY_M_AN_1000X_AFD      = 1<<5, /* Advertise 10000Base-X Full Duplex */
 };
@@ -1320,7 +1321,7 @@ enum {
 
 /*****  PHY_MARV_1000T_CTRL    16 bit r/w      1000Base-T Control Reg *****/
 enum {
-       PHY_M_1000C_TEST        = 7<<13,/* Bit 15..13:  Test Modes */
+       PHY_M_1000C_TEST= 7<<13,/* Bit 15..13:  Test Modes */
        PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */
        PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */
        PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */
@@ -1349,7 +1350,7 @@ enum {
        PHY_M_PC_EN_DET_PLUS    = 3<<8, /* Energy Detect Plus (Mode 2) */
 };
 
-#define PHY_M_PC_MDI_XMODE(x)  (((x)<<5) & PHY_M_PC_MDIX_MSK)
+#define PHY_M_PC_MDI_XMODE(x)  ((((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
 
 enum {
        PHY_M_PC_MAN_MDI        = 0, /* 00 = Manual MDI configuration */
@@ -1432,24 +1433,24 @@ enum {
        PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
        PHY_M_EC_M_DSC_MSK  = 3<<10, /* Bit 11..10:     Master Downshift Counter */
                                        /* (88E1011 only) */
-       PHY_M_EC_S_DSC_MSK      = 3<<8,/* Bit  9.. 8:   Slave  Downshift Counter */
+       PHY_M_EC_S_DSC_MSK  = 3<<8,  /* Bit  9.. 8:     Slave  Downshift Counter */
                                       /* (88E1011 only) */
-       PHY_M_EC_M_DSC_MSK2     = 7<<9,/* Bit 11.. 9:   Master Downshift Counter */
+       PHY_M_EC_M_DSC_MSK2  = 7<<9, /* Bit 11.. 9:     Master Downshift Counter */
                                        /* (88E1111 only) */
-       PHY_M_EC_DOWN_S_ENA     = 1<<8, /* Downshift Enable (88E1111 only) */
+       PHY_M_EC_DOWN_S_ENA  = 1<<8, /* Downshift Enable (88E1111 only) */
                                        /* !!! Errata in spec. (1 = disable) */
-       PHY_M_EC_RX_TIM_CT      = 1<<7, /* RGMII Rx Timing Control*/
-       PHY_M_EC_MAC_S_MSK      = 7<<4,/* Bit  6.. 4:   Def. MAC interface speed */
-       PHY_M_EC_FIB_AN_ENA     = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
-       PHY_M_EC_DTE_D_ENA      = 1<<2, /* DTE Detect Enable (88E1111 only) */
-       PHY_M_EC_TX_TIM_CT      = 1<<1, /* RGMII Tx Timing Control */
-       PHY_M_EC_TRANS_DIS      = 1<<0, /* Transmitter Disable (88E1111 only) */};
-
-#define PHY_M_EC_M_DSC(x)      ((x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
-#define PHY_M_EC_S_DSC(x)      ((x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
-#define PHY_M_EC_MAC_S(x)      ((x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
-
-#define PHY_M_EC_M_DSC_2(x)    ((x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
+       PHY_M_EC_RX_TIM_CT   = 1<<7, /* RGMII Rx Timing Control*/
+       PHY_M_EC_MAC_S_MSK   = 7<<4, /* Bit  6.. 4:     Def. MAC interface speed */
+       PHY_M_EC_FIB_AN_ENA  = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+       PHY_M_EC_DTE_D_ENA   = 1<<2, /* DTE Detect Enable (88E1111 only) */
+       PHY_M_EC_TX_TIM_CT   = 1<<1, /* RGMII Tx Timing Control */
+       PHY_M_EC_TRANS_DIS   = 1<<0, /* Transmitter Disable (88E1111 only) */};
+
+#define PHY_M_EC_M_DSC(x)      ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x)      ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_MAC_S(x)      ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */
+
+#define PHY_M_EC_M_DSC_2(x)    ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */
                                                                                        /* 100=5x; 101=6x; 110=7x; 111=8x */
 enum {
        MAC_TX_CLK_0_MHZ        = 2,
@@ -1468,10 +1469,12 @@ enum {
        PHY_M_LEDC_LK_C_MSK     = 7<<3,/* Bit  5.. 3: Link Control Mask */
                                        /* (88E1111 only) */
 };
+#define PHY_M_LED_PULS_DUR(x)  (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_BLINK_RT(x)  (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
-       PHY_M_LEDC_LINK_MSK     = 3<<3,/* Bit  4.. 3: Link Control Mask */
-                                                                       /* (88E1011 only) */
+       PHY_M_LEDC_LINK_MSK     = 3<<3, /* Bit  4.. 3: Link Control Mask */
+                                       /* (88E1011 only) */
        PHY_M_LEDC_DP_CTRL      = 1<<2, /* Duplex Control */
        PHY_M_LEDC_DP_C_MSB     = 1<<2, /* Duplex Control (MSB, 88E1111 only) */
        PHY_M_LEDC_RX_CTRL      = 1<<1, /* Rx Activity / Link */
@@ -1479,27 +1482,24 @@ enum {
        PHY_M_LEDC_TX_C_MSB     = 1<<0, /* Tx Control (MSB, 88E1111 only) */
 };
 
-#define PHY_M_LED_PULS_DUR(x)  (((x)<<12) & PHY_M_LEDC_PULS_MSK)
-
 enum {
-       PULS_NO_STR     = 0,/* no pulse stretching */
-       PULS_21MS       = 1,/* 21 ms to 42 ms */
-       PULS_42MS       = 2,/* 42 ms to 84 ms */
-       PULS_84MS       = 3,/* 84 ms to 170 ms */
-       PULS_170MS      = 4,/* 170 ms to 340 ms */
-       PULS_340MS      = 5,/* 340 ms to 670 ms */
-       PULS_670MS      = 6,/* 670 ms to 1.3 s */
-       PULS_1300MS     = 7,/* 1.3 s to 2.7 s */
+       PULS_NO_STR     = 0, /* no pulse stretching */
+       PULS_21MS       = 1, /* 21 ms to 42 ms */
+       PULS_42MS       = 2, /* 42 ms to 84 ms */
+       PULS_84MS       = 3, /* 84 ms to 170 ms */
+       PULS_170MS      = 4, /* 170 ms to 340 ms */
+       PULS_340MS      = 5, /* 340 ms to 670 ms */
+       PULS_670MS      = 6, /* 670 ms to 1.3 s */
+       PULS_1300MS     = 7, /* 1.3 s to 2.7 s */
 };
 
-#define PHY_M_LED_BLINK_RT(x)  (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
-       BLINK_42MS      = 0,/* 42 ms */
-       BLINK_84MS      = 1,/* 84 ms */
-       BLINK_170MS     = 2,/* 170 ms */
-       BLINK_340MS     = 3,/* 340 ms */
-       BLINK_670MS     = 4,/* 670 ms */
+       BLINK_42MS      = 0, /* 42 ms */
+       BLINK_84MS      = 1, /* 84 ms */
+       BLINK_170MS     = 2, /* 170 ms */
+       BLINK_340MS     = 3, /* 340 ms */
+       BLINK_670MS     = 4, /* 670 ms */
 };
 
 /*****  PHY_MARV_LED_OVER      16 bit r/w      Manual LED Override Reg *****/
@@ -1525,7 +1525,7 @@ enum {
        PHY_M_EC2_FO_IMPED      = 1<<5, /* Fiber Output Impedance */
        PHY_M_EC2_FO_M_CLK      = 1<<4, /* Fiber Mode Clock Enable */
        PHY_M_EC2_FO_BOOST      = 1<<3, /* Fiber Output Boost */
-       PHY_M_EC2_FO_AM_MSK     = 7,/* Bit  2.. 0:      Fiber Output Amplitude */
+       PHY_M_EC2_FO_AM_MSK     = 7, /* Bit  2.. 0:     Fiber Output Amplitude */
 };
 
 /*****  PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
@@ -1550,7 +1550,7 @@ enum {
        PHY_M_CABD_DIS_WAIT     = 1<<15, /* Disable Waiting Period (Page 1) */
                                        /* (88E1111 only) */
        PHY_M_CABD_STAT_MSK     = 3<<13, /* Bit 14..13: Status Mask */
-       PHY_M_CABD_AMPL_MSK     = 0x1f<<8,/* Bit 12.. 8: Amplitude Mask */
+       PHY_M_CABD_AMPL_MSK     = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */
                                        /* (88E1111 only) */
        PHY_M_CABD_DIST_MSK     = 0xff, /* Bit  7.. 0: Distance Mask */
 };
@@ -1605,9 +1605,9 @@ enum {
 
 /*****  PHY_MARV_PHY_CTRL (page 3)             16 bit r/w      LED Control Reg. *****/
 enum {
-       PHY_M_LEDC_LOS_MSK      = 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+       PHY_M_LEDC_LOS_MSK      = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */
        PHY_M_LEDC_INIT_MSK     = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
-       PHY_M_LEDC_STA1_MSK     = 0xf<<4,/* Bit  7.. 4: STAT1 LED Ctrl. Mask */
+       PHY_M_LEDC_STA1_MSK     = 0xf<<4, /* Bit  7.. 4: STAT1 LED Ctrl. Mask */
        PHY_M_LEDC_STA0_MSK     = 0xf, /* Bit  3.. 0: STAT0 LED Ctrl. Mask */
 };
 
@@ -1804,8 +1804,8 @@ enum {
 
 /*     GM_SMI_CTRL                     16 bit r/w      SMI Control Register */
 enum {
-       GM_SMI_CT_PHY_A_MSK     = 0x1f<<11,/* Bit 15..11:       PHY Device Address */
-       GM_SMI_CT_REG_A_MSK     = 0x1f<<6,/* Bit 10.. 6:        PHY Register Address */
+       GM_SMI_CT_PHY_A_MSK     = 0x1f<<11, /* Bit 15..11:      PHY Device Address */
+       GM_SMI_CT_REG_A_MSK     = 0x1f<<6, /* Bit 10.. 6:       PHY Register Address */
        GM_SMI_CT_OP_RD         = 1<<5, /* Bit  5:      OpCode Read (0=Write)*/
        GM_SMI_CT_RD_VAL        = 1<<4, /* Bit  4:      Read Valid (Read completed) */
        GM_SMI_CT_BUSY          = 1<<3, /* Bit  3:      Busy (Operation in progress) */
@@ -1875,9 +1875,9 @@ enum {
 
 /*     TX_GMF_CTRL_T   32 bit  Tx GMAC FIFO Control/Test */
 enum {
-       GMF_WSP_TST_ON  = 1<<18,/* Write Shadow Pointer Test On */
-       GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
-       GMF_WSP_STEP    = 1<<16,/* Write Shadow Pointer Step/Increment */
+       GMF_WSP_TST_ON  = 1<<18, /* Write Shadow Pointer Test On */
+       GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */
+       GMF_WSP_STEP    = 1<<16, /* Write Shadow Pointer Step/Increment */
 
        GMF_CLI_TX_FU   = 1<<6, /* Clear IRQ Tx FIFO Underrun */
        GMF_CLI_TX_FC   = 1<<5, /* Clear IRQ Tx Frame Complete */
@@ -2111,18 +2111,18 @@ enum {
 
 /*     XM_MMU_CMD      16 bit r/w      MMU Command Register */
 enum {
-       XM_MMU_PHY_RDY  = 1<<12,/* Bit 12:      PHY Read Ready */
-       XM_MMU_PHY_BUSY = 1<<11,/* Bit 11:      PHY Busy */
-       XM_MMU_IGN_PF   = 1<<10,/* Bit 10:      Ignore Pause Frame */
-       XM_MMU_MAC_LB   = 1<<9, /* Bit  9:      Enable MAC Loopback */
-       XM_MMU_FRC_COL  = 1<<7, /* Bit  7:      Force Collision */
-       XM_MMU_SIM_COL  = 1<<6, /* Bit  6:      Simulate Collision */
-       XM_MMU_NO_PRE   = 1<<5, /* Bit  5:      No MDIO Preamble */
-       XM_MMU_GMII_FD  = 1<<4, /* Bit  4:      GMII uses Full Duplex */
-       XM_MMU_RAT_CTRL = 1<<3, /* Bit  3:      Enable Rate Control */
-       XM_MMU_GMII_LOOP= 1<<2, /* Bit  2:      PHY is in Loopback Mode */
-       XM_MMU_ENA_RX   = 1<<1, /* Bit  1:      Enable Receiver */
-       XM_MMU_ENA_TX   = 1<<0, /* Bit  0:      Enable Transmitter */
+       XM_MMU_PHY_RDY  = 1<<12, /* Bit 12:     PHY Read Ready */
+       XM_MMU_PHY_BUSY = 1<<11, /* Bit 11:     PHY Busy */
+       XM_MMU_IGN_PF   = 1<<10, /* Bit 10:     Ignore Pause Frame */
+       XM_MMU_MAC_LB   = 1<<9,  /* Bit  9:     Enable MAC Loopback */
+       XM_MMU_FRC_COL  = 1<<7,  /* Bit  7:     Force Collision */
+       XM_MMU_SIM_COL  = 1<<6,  /* Bit  6:     Simulate Collision */
+       XM_MMU_NO_PRE   = 1<<5,  /* Bit  5:     No MDIO Preamble */
+       XM_MMU_GMII_FD  = 1<<4,  /* Bit  4:     GMII uses Full Duplex */
+       XM_MMU_RAT_CTRL = 1<<3,  /* Bit  3:     Enable Rate Control */
+       XM_MMU_GMII_LOOP= 1<<2,  /* Bit  2:     PHY is in Loopback Mode */
+       XM_MMU_ENA_RX   = 1<<1,  /* Bit  1:     Enable Receiver */
+       XM_MMU_ENA_TX   = 1<<0,  /* Bit  0:     Enable Transmitter */
 };
 
 
@@ -2456,7 +2456,7 @@ struct skge_port {
 
        struct net_device_stats net_stats;
 
-       struct work_struct   link_thread;
+       struct delayed_work  link_thread;
        enum pause_control   flow_control;
        enum pause_status    flow_status;
        u8                   rx_csum;
@@ -2506,7 +2506,7 @@ static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
 }
 
 /* MAC Related Registers inside the device. */
-#define SK_REG(port,reg)       (((port)<<7)+(reg))
+#define SK_REG(port,reg)       (((port)<<7)+(u16)(reg))
 #define SK_XMAC_REG(port, reg) \
        ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
 
index 842abd9396c64ca7aecee1de7be90e06a141d0f1..fb1d2c30c1bb4a34f0d89650e0ed75ed3b11256f 100644 (file)
@@ -100,33 +100,32 @@ module_param(idle_timeout, int, 0);
 MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)");
 
 static const struct pci_device_id sky2_id_table[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
-       { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+       { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
+       { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },    /* DGE-560T */
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) },    /* DGE-550SX */
        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },    /* DGE-560SX */
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
-       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
+       { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
        { 0 }
 };
 
@@ -522,7 +521,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
                /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
                /* turn off the Rx LED (LED_RX) */
-               ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+               ledover &= ~PHY_M_LED_MO_RX;
        }
 
        if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
@@ -545,7 +544,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
                if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
                        /* turn on 100 Mbps LED (LED_LINK100) */
-                       ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+                       ledover |= PHY_M_LED_MO_100;
                }
 
                if (ledover)
@@ -697,10 +696,15 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 
 }
 
-/* Assign Ram Buffer allocation in units of 64bit (8 bytes) */
-static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 end)
+/* Assign Ram Buffer allocation to queue */
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
 {
-       pr_debug(PFX "q %d %#x %#x\n", q, start, end);
+       u32 end;
+
+       /* convert from K bytes to qwords used for hw register */
+       start *= 1024/8;
+       space *= 1024/8;
+       end = start + space - 1;
 
        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
        sky2_write32(hw, RB_ADDR(q, RB_START), start);
@@ -709,7 +713,6 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 end)
        sky2_write32(hw, RB_ADDR(q, RB_RP), start);
 
        if (q == Q_R1 || q == Q_R2) {
-               u32 space = end - start + 1;
                u32 tp = space - space/4;
 
                /* On receive queue's set the thresholds
@@ -1059,11 +1062,16 @@ static int sky2_rx_start(struct sky2_port *sky2)
        sky2->rx_put = sky2->rx_next = 0;
        sky2_qset(hw, rxq);
 
+       /* On PCI express lowering the watermark gives better performance */
+       if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+               sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
+
+       /* These chips have no ram buffer?
+        * MAC Rx RAM Read is controlled by hardware */
        if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
-           (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) {
-               /* MAC Rx RAM Read is controlled by hardware */
+           (hw->chip_rev == CHIP_REV_YU_EC_U_A1
+            || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
                sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
-       }
 
        sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
 
@@ -1139,7 +1147,7 @@ static int sky2_up(struct net_device *dev)
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
        unsigned port = sky2->port;
-       u32 ramsize, rxspace, imask;
+       u32 ramsize, imask;
        int cap, err = -ENOMEM;
        struct net_device *otherdev = hw->dev[sky2->port^1];
 
@@ -1192,20 +1200,25 @@ static int sky2_up(struct net_device *dev)
 
        sky2_mac_init(hw, port);
 
-       /* Determine available ram buffer space in qwords.  */
-       ramsize = sky2_read8(hw, B2_E_0) * 4096/8;
+       /* Register is number of 4K blocks on internal RAM buffer. */
+       ramsize = sky2_read8(hw, B2_E_0) * 4;
+       printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize);
 
-       if (ramsize > 6*1024/8)
-               rxspace = ramsize - (ramsize + 2) / 3;
-       else
-               rxspace = ramsize / 2;
+       if (ramsize > 0) {
+               u32 rxspace;
 
-       sky2_ramset(hw, rxqaddr[port], 0, rxspace-1);
-       sky2_ramset(hw, txqaddr[port], rxspace, ramsize-1);
+               if (ramsize < 16)
+                       rxspace = ramsize / 2;
+               else
+                       rxspace = 8 + (2*(ramsize - 16))/3;
+
+               sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+               sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
 
-       /* Make sure SyncQ is disabled */
-       sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
-                   RB_RST_SET);
+               /* Make sure SyncQ is disabled */
+               sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
+                           RB_RST_SET);
+       }
 
        sky2_qset(hw, txqaddr[port]);
 
@@ -2917,18 +2930,8 @@ static void sky2_led(struct sky2_hw *hw, unsigned port, int on)
 
        default:
                gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
-               gm_phy_write(hw, port, PHY_MARV_LED_OVER,
-                            on ? PHY_M_LED_MO_DUP(MO_LED_ON) |
-                            PHY_M_LED_MO_10(MO_LED_ON) |
-                            PHY_M_LED_MO_100(MO_LED_ON) |
-                            PHY_M_LED_MO_1000(MO_LED_ON) |
-                            PHY_M_LED_MO_RX(MO_LED_ON)
-                            : PHY_M_LED_MO_DUP(MO_LED_OFF) |
-                            PHY_M_LED_MO_10(MO_LED_OFF) |
-                            PHY_M_LED_MO_100(MO_LED_OFF) |
-                            PHY_M_LED_MO_1000(MO_LED_OFF) |
-                            PHY_M_LED_MO_RX(MO_LED_OFF));
-
+               gm_phy_write(hw, port, PHY_MARV_LED_OVER, 
+                            on ? PHY_M_LED_ALL : 0);
        }
 }
 
index 7760545edbf2ae8a95f7e9611e6f752bf70e8952..6ed1d47dbbd3452bfa0460fb6b92df67c8370f5f 100644 (file)
@@ -608,7 +608,7 @@ enum {
        PHY_ADDR_MARV   = 0,
 };
 
-#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
+#define RB_ADDR(offs, queue) ((u16) B16_RAM_REGS + (queue) + (offs))
 
 
 enum {
@@ -680,6 +680,7 @@ enum {
                          BMU_FIFO_ENA | BMU_OP_ON,
 
        BMU_WM_DEFAULT = 0x600,
+       BMU_WM_PEX     = 0x80,
 };
 
 /* Tx BMU Control / Status Registers (Yukon-2) */
@@ -1060,7 +1061,7 @@ enum {
        PHY_M_PC_EN_DET_PLUS    = 3<<8, /* Energy Detect Plus (Mode 2) */
 };
 
-#define PHY_M_PC_MDI_XMODE(x)  (((x)<<5) & PHY_M_PC_MDIX_MSK)
+#define PHY_M_PC_MDI_XMODE(x)  (((u16)(x)<<5) & PHY_M_PC_MDIX_MSK)
 
 enum {
        PHY_M_PC_MAN_MDI        = 0, /* 00 = Manual MDI configuration */
@@ -1156,13 +1157,13 @@ enum {
        PHY_M_EC_TX_TIM_CT  = 1<<1, /* RGMII Tx Timing Control */
        PHY_M_EC_TRANS_DIS  = 1<<0, /* Transmitter Disable (88E1111 only) */};
 
-#define PHY_M_EC_M_DSC(x)      ((x)<<10 & PHY_M_EC_M_DSC_MSK)
+#define PHY_M_EC_M_DSC(x)      ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
                                        /* 00=1x; 01=2x; 10=3x; 11=4x */
-#define PHY_M_EC_S_DSC(x)      ((x)<<8 & PHY_M_EC_S_DSC_MSK)
+#define PHY_M_EC_S_DSC(x)      ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
                                        /* 00=dis; 01=1x; 10=2x; 11=3x */
-#define PHY_M_EC_DSC_2(x)      ((x)<<9 & PHY_M_EC_M_DSC_MSK2)
+#define PHY_M_EC_DSC_2(x)      ((u16)(x)<<9 & PHY_M_EC_M_DSC_MSK2)
                                        /* 000=1x; 001=2x; 010=3x; 011=4x */
-#define PHY_M_EC_MAC_S(x)      ((x)<<4 & PHY_M_EC_MAC_S_MSK)
+#define PHY_M_EC_MAC_S(x)      ((u16)(x)<<4 & PHY_M_EC_MAC_S_MSK)
                                        /* 01X=0; 110=2.5; 111=25 (MHz) */
 
 /* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
@@ -1173,7 +1174,7 @@ enum {
 };
 /* !!! Errata in spec. (1 = disable) */
 
-#define PHY_M_PC_DSC(x)                        (((x)<<12) & PHY_M_PC_DSC_MSK)
+#define PHY_M_PC_DSC(x)                        (((u16)(x)<<12) & PHY_M_PC_DSC_MSK)
                                                                                        /* 100=5x; 101=6x; 110=7x; 111=8x */
 enum {
        MAC_TX_CLK_0_MHZ        = 2,
@@ -1203,7 +1204,7 @@ enum {
        PHY_M_LEDC_TX_C_MSB     = 1<<0, /* Tx Control (MSB, 88E1111 only) */
 };
 
-#define PHY_M_LED_PULS_DUR(x)  (((x)<<12) & PHY_M_LEDC_PULS_MSK)
+#define PHY_M_LED_PULS_DUR(x)  (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK)
 
 /*****  PHY_MARV_PHY_STAT (page 3)16 bit r/w   Polarity Control Reg. *****/
 enum {
@@ -1233,7 +1234,7 @@ enum {
        PULS_1300MS     = 7,/* 1.3 s to 2.7 s */
 };
 
-#define PHY_M_LED_BLINK_RT(x)  (((x)<<8) & PHY_M_LEDC_BL_R_MSK)
+#define PHY_M_LED_BLINK_RT(x)  (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK)
 
 enum {
        BLINK_42MS      = 0,/* 42 ms */
@@ -1243,21 +1244,18 @@ enum {
        BLINK_670MS     = 4,/* 670 ms */
 };
 
-/*****  PHY_MARV_LED_OVER      16 bit r/w      Manual LED Override Reg *****/
-#define PHY_M_LED_MO_SGMII(x)  ((x)<<14) /* Bit 15..14:  SGMII AN Timer */
-                                                                               /* Bit 13..12:  reserved */
-#define PHY_M_LED_MO_DUP(x)    ((x)<<10) /* Bit 11..10:  Duplex */
-#define PHY_M_LED_MO_10(x)     ((x)<<8) /* Bit  9.. 8:  Link 10 */
-#define PHY_M_LED_MO_100(x)    ((x)<<6) /* Bit  7.. 6:  Link 100 */
-#define PHY_M_LED_MO_1000(x)   ((x)<<4) /* Bit  5.. 4:  Link 1000 */
-#define PHY_M_LED_MO_RX(x)     ((x)<<2) /* Bit  3.. 2:  Rx */
-#define PHY_M_LED_MO_TX(x)     ((x)<<0) /* Bit  1.. 0:  Tx */
-
+/**** PHY_MARV_LED_OVER    16 bit r/w LED control */
 enum {
-       MO_LED_NORM     = 0,
-       MO_LED_BLINK    = 1,
-       MO_LED_OFF      = 2,
-       MO_LED_ON       = 3,
+       PHY_M_LED_MO_DUP  = 3<<10,/* Bit 11..10:  Duplex */
+       PHY_M_LED_MO_10   = 3<<8, /* Bit  9.. 8:  Link 10 */
+       PHY_M_LED_MO_100  = 3<<6, /* Bit  7.. 6:  Link 100 */
+       PHY_M_LED_MO_1000 = 3<<4, /* Bit  5.. 4:  Link 1000 */
+       PHY_M_LED_MO_RX   = 3<<2, /* Bit  3.. 2:  Rx */
+       PHY_M_LED_MO_TX   = 3<<0, /* Bit  1.. 0:  Tx */
+
+       PHY_M_LED_ALL     = PHY_M_LED_MO_DUP | PHY_M_LED_MO_10 
+                           | PHY_M_LED_MO_100 | PHY_M_LED_MO_1000
+                           | PHY_M_LED_MO_RX,
 };
 
 /*****  PHY_MARV_EXT_CTRL_2    16 bit r/w      Ext. PHY Specific Ctrl 2 *****/
@@ -1294,9 +1292,9 @@ enum {
        PHY_M_FELP_LED0_MSK = 0xf, /* Bit  3.. 0: LED0 Mask (SPEED) */
 };
 
-#define PHY_M_FELP_LED2_CTRL(x)        (((x)<<8) & PHY_M_FELP_LED2_MSK)
-#define PHY_M_FELP_LED1_CTRL(x)        (((x)<<4) & PHY_M_FELP_LED1_MSK)
-#define PHY_M_FELP_LED0_CTRL(x)        (((x)<<0) & PHY_M_FELP_LED0_MSK)
+#define PHY_M_FELP_LED2_CTRL(x)        (((u16)(x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x)        (((u16)(x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x)        (((u16)(x)<<0) & PHY_M_FELP_LED0_MSK)
 
 enum {
        LED_PAR_CTRL_COLX       = 0x00,
@@ -1552,8 +1550,8 @@ enum {
        GM_SMI_CT_BUSY          = 1<<3, /* Bit  3:      Busy (Operation in progress) */
 };
 
-#define GM_SMI_CT_PHY_AD(x)    (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
-#define GM_SMI_CT_REG_AD(x)    (((x)<<6) & GM_SMI_CT_REG_A_MSK)
+#define GM_SMI_CT_PHY_AD(x)    (((u16)(x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x)    (((u16)(x)<<6) & GM_SMI_CT_REG_A_MSK)
 
 /*     GM_PHY_ADDR                             16 bit r/w      GPHY Address Register */
 enum {
index 889ef0d7c37499bba55e9f0e311dcf9dc1abdcec..d70bc979534669ba60eb49109ad73797bcb00cdd 100644 (file)
@@ -593,7 +593,7 @@ static void cleanup_card(struct net_device *dev)
        iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index e10755ec5defa321075923080dc05d586d499374..2c5319c62fa50c520f1762880b7cea57f15aef4d 100644 (file)
@@ -437,7 +437,7 @@ int __init init_module(void)
        return -ENXIO;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        int this_dev;
 
index c0d13d65091333555f118f5c16f386a955b500cf..bd6e84506c292d57064ddebd8ab105d25a318bb0 100644 (file)
@@ -1616,7 +1616,7 @@ int __init init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(devSMC9194);
        free_irq(devSMC9194->irq, devSMC9194);
index 95b6478f55c66c8c53f36a9f65180effbc47e33e..e62a9586fb95e3ca054d92b0972412c5daea8122 100644 (file)
@@ -210,6 +210,7 @@ struct smc_local {
 
        /* work queue */
        struct work_struct phy_configure;
+       struct net_device *dev;
        int     work_pending;
 
        spinlock_t lock;
@@ -1114,10 +1115,11 @@ static void smc_phy_check_media(struct net_device *dev, int init)
  * of autonegotiation.)  If the RPC ANEG bit is cleared, the selection
  * is controlled by the RPC SPEED and RPC DPLX bits.
  */
-static void smc_phy_configure(void *data)
+static void smc_phy_configure(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct smc_local *lp = netdev_priv(dev);
+       struct smc_local *lp =
+               container_of(work, struct smc_local, phy_configure);
+       struct net_device *dev = lp->dev;
        void __iomem *ioaddr = lp->base;
        int phyaddr = lp->mii.phy_id;
        int my_phy_caps; /* My PHY capabilities */
@@ -1592,7 +1594,7 @@ smc_open(struct net_device *dev)
 
        /* Configure the PHY, initialize the link state */
        if (lp->phy_type != 0)
-               smc_phy_configure(dev);
+               smc_phy_configure(&lp->phy_configure);
        else {
                spin_lock_irq(&lp->lock);
                smc_10bt_check_media(dev, 1);
@@ -1972,7 +1974,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
 #endif
 
        tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
-       INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
+       INIT_WORK(&lp->phy_configure, smc_phy_configure);
+       lp->dev = dev;
        lp->mii.phy_id_mask = 0x1f;
        lp->mii.reg_num_mask = 0x1f;
        lp->mii.force_media = 0;
@@ -2322,7 +2325,7 @@ static int smc_drv_resume(struct platform_device *dev)
                        smc_reset(ndev);
                        smc_enable(ndev);
                        if (lp->phy_type != 0)
-                               smc_phy_configure(ndev);
+                               smc_phy_configure(&lp->phy_configure);
                        netif_device_attach(ndev);
                }
        }
index a8640169fc77afede8c7f28376ccc8967b214668..9367c574477ad57d6c89b6f5d8141227eea0dd22 100644 (file)
@@ -238,7 +238,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
 #define SMC_CAN_USE_16BIT      1
 #define SMC_CAN_USE_32BIT      0
 
-#define SMC_inb(a, r)          inb((u32)a) + (r))
+#define SMC_inb(a, r)          inb(((u32)a) + (r))
 #define SMC_inw(a, r)          inw(((u32)a) + (r))
 #define SMC_outb(v, a, r)      outb(v, ((u32)a) + (r))
 #define SMC_outw(v, a, r)      outw(v, ((u32)a) + (r))
@@ -434,6 +434,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
 
 #define SMC_IRQ_FLAGS          (0)
 
+#elif  defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT       1
+#define SMC_CAN_USE_16BIT      1
+#define SMC_CAN_USE_32BIT      1
+#define SMC_NOWAIT             1
+
+#define SMC_inb(a, r)          readb((a) + (r))
+#define SMC_inw(a, r)          readw((a) + (r))
+#define SMC_inl(a, r)          readl((a) + (r))
+#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)      writew(v, (a) + (r))
+#define SMC_outl(v, a, r)      writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l)  writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS          (0)
+
 #else
 
 #define SMC_CAN_USE_8BIT       1
@@ -1216,7 +1234,7 @@ static const char * chip_ids[ 16 ] =  {
                if (SMC_CAN_USE_32BIT) {                                \
                        void *__ptr = (p);                              \
                        int __len = (l);                                \
-                       void *__ioaddr = ioaddr;                        \
+                       void __iomem *__ioaddr = ioaddr;                \
                        if (__len >= 2 && (unsigned long)__ptr & 2) {   \
                                __len -= 2;                             \
                                SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \
@@ -1240,7 +1258,7 @@ static const char * chip_ids[ 16 ] =  {
                if (SMC_CAN_USE_32BIT) {                                \
                        void *__ptr = (p);                              \
                        int __len = (l);                                \
-                       void *__ioaddr = ioaddr;                        \
+                       void __iomem *__ioaddr = ioaddr;                \
                        if ((unsigned long)__ptr & 2) {                 \
                                /*                                      \
                                 * We want 32bit alignment here.        \
index cef7e6671c49b658d9ce618eafad16213b7b09f9..ebb6aa39f9c7c18895f7c15d5858674c30f48f70 100644 (file)
@@ -88,12 +88,11 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
 static inline u32
 spider_net_read_reg(struct spider_net_card *card, u32 reg)
 {
-       u32 value;
-
-       value = readl(card->regs + reg);
-       value = le32_to_cpu(value);
-
-       return value;
+       /* We use the powerpc specific variants instead of readl_be() because
+        * we know spidernet is not a real PCI device and we can thus avoid the
+        * performance hit caused by the PCI workarounds.
+        */
+       return in_be32(card->regs + reg);
 }
 
 /**
@@ -105,8 +104,11 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
 static inline void
 spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
 {
-       value = cpu_to_le32(value);
-       writel(value, card->regs + reg);
+       /* We use the powerpc specific variants instead of writel_be() because
+        * we know spidernet is not a real PCI device and we can thus avoid the
+        * performance hit caused by the PCI workarounds.
+        */
+       out_be32(card->regs + reg, value);
 }
 
 /** spider_net_write_phy - write to phy register
@@ -1937,10 +1939,11 @@ spider_net_stop(struct net_device *netdev)
  * called as task when tx hangs, resets interface (if interface is up)
  */
 static void
-spider_net_tx_timeout_task(void *data)
+spider_net_tx_timeout_task(struct work_struct *work)
 {
-       struct net_device *netdev = data;
-       struct spider_net_card *card = netdev_priv(netdev);
+       struct spider_net_card *card =
+               container_of(work, struct spider_net_card, tx_timeout_task);
+       struct net_device *netdev = card->netdev;
 
        if (!(netdev->flags & IFF_UP))
                goto out;
@@ -2114,7 +2117,7 @@ spider_net_alloc_card(void)
        card = netdev_priv(netdev);
        card->netdev = netdev;
        card->msg_enable = SPIDER_NET_DEFAULT_MSG;
-       INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
+       INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
        init_waitqueue_head(&card->waitq);
        atomic_set(&card->tx_timeout_task_counter, 0);
 
index 47a1c09d19acc98b43faba5d799170ef6d646a9a..c62e85d89f4173e78d0a4c9227463d865800f0a4 100644 (file)
@@ -945,7 +945,7 @@ static void set_multicast_list( struct net_device *dev )
 
 static struct net_device *sun3lance_dev;
 
-int init_module(void)
+int __init init_module(void)
 {
        sun3lance_dev = sun3lance_probe(-1);
        if (IS_ERR(sun3lance_dev))
@@ -953,7 +953,7 @@ int init_module(void)
        return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
        unregister_netdev(sun3lance_dev);
 #ifdef CONFIG_SUN3
index cf44e72399b9cf858508b179237a0e903c3f996f..785e4a535f9ed7543467001281327cc76eb46cb8 100644 (file)
@@ -2282,9 +2282,9 @@ static void gem_do_stop(struct net_device *dev, int wol)
        }
 }
 
-static void gem_reset_task(void *data)
+static void gem_reset_task(struct work_struct *work)
 {
-       struct gem *gp = (struct gem *) data;
+       struct gem *gp = container_of(work, struct gem, reset_task);
 
        mutex_lock(&gp->pm_mutex);
 
@@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
        gp->link_timer.function = gem_link_timer;
        gp->link_timer.data = (unsigned long) gp;
 
-       INIT_WORK(&gp->reset_task, gem_reset_task, gp);
+       INIT_WORK(&gp->reset_task, gem_reset_task);
 
        gp->lstate = link_down;
        gp->timer_ticks = 0;
index c20bb998e0e562c8261b12b7e9b8505f534da47d..571320ae87abf4ca106767846b9a57657c1366fd 100644 (file)
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.69"
-#define DRV_MODULE_RELDATE     "November 15, 2006"
+#define DRV_MODULE_VERSION     "3.70"
+#define DRV_MODULE_RELDATE     "December 1, 2006"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
@@ -192,6 +192,7 @@ static struct pci_device_id tg3_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -1061,7 +1062,7 @@ static void tg3_frob_aux_power(struct tg3 *tp)
 {
        struct tg3 *tp_peer = tp;
 
-       if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
+       if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
                return;
 
        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
@@ -1212,8 +1213,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
                                      power_control);
                udelay(100);    /* Delay after power state change */
 
-               /* Switch out of Vaux if it is not a LOM */
-               if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
+               /* Switch out of Vaux if it is a NIC */
+               if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
 
                return 0;
@@ -1401,8 +1402,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
 static void tg3_link_report(struct tg3 *tp)
 {
        if (!netif_carrier_ok(tp->dev)) {
-               printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
-       } else {
+               if (netif_msg_link(tp))
+                       printk(KERN_INFO PFX "%s: Link is down.\n",
+                              tp->dev->name);
+       } else if (netif_msg_link(tp)) {
                printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
                       tp->dev->name,
                       (tp->link_config.active_speed == SPEED_1000 ?
@@ -1557,12 +1560,6 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
 
                tg3_writephy(tp, MII_ADVERTISE, new_adv);
        } else if (tp->link_config.speed == SPEED_INVALID) {
-               tp->link_config.advertising =
-                       (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
-                        ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
-                        ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
-                        ADVERTISED_Autoneg | ADVERTISED_MII);
-
                if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
                        tp->link_config.advertising &=
                                ~(ADVERTISED_1000baseT_Half |
@@ -1706,25 +1703,36 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
        return err;
 }
 
-static int tg3_copper_is_advertising_all(struct tg3 *tp)
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
 {
-       u32 adv_reg, all_mask;
+       u32 adv_reg, all_mask = 0;
+
+       if (mask & ADVERTISED_10baseT_Half)
+               all_mask |= ADVERTISE_10HALF;
+       if (mask & ADVERTISED_10baseT_Full)
+               all_mask |= ADVERTISE_10FULL;
+       if (mask & ADVERTISED_100baseT_Half)
+               all_mask |= ADVERTISE_100HALF;
+       if (mask & ADVERTISED_100baseT_Full)
+               all_mask |= ADVERTISE_100FULL;
 
        if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
                return 0;
 
-       all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
-                   ADVERTISE_100HALF | ADVERTISE_100FULL);
        if ((adv_reg & all_mask) != all_mask)
                return 0;
        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
                u32 tg3_ctrl;
 
+               all_mask = 0;
+               if (mask & ADVERTISED_1000baseT_Half)
+                       all_mask |= ADVERTISE_1000HALF;
+               if (mask & ADVERTISED_1000baseT_Full)
+                       all_mask |= ADVERTISE_1000FULL;
+
                if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
                        return 0;
 
-               all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
-                           MII_TG3_CTRL_ADV_1000_FULL);
                if ((tg3_ctrl & all_mask) != all_mask)
                        return 0;
        }
@@ -1884,7 +1892,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
                                /* Force autoneg restart if we are exiting
                                 * low power mode.
                                 */
-                               if (!tg3_copper_is_advertising_all(tp))
+                               if (!tg3_copper_is_advertising_all(tp,
+                                               tp->link_config.advertising))
                                        current_link_up = 0;
                        } else {
                                current_link_up = 0;
@@ -3654,9 +3663,9 @@ static void tg3_poll_controller(struct net_device *dev)
 }
 #endif
 
-static void tg3_reset_task(void *_data)
+static void tg3_reset_task(struct work_struct *work)
 {
-       struct tg3 *tp = _data;
+       struct tg3 *tp = container_of(work, struct tg3, reset_task);
        unsigned int restart_timer;
 
        tg3_full_lock(tp, 0);
@@ -3703,8 +3712,9 @@ static void tg3_tx_timeout(struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
 
-       printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
-              dev->name);
+       if (netif_msg_tx_err(tp))
+               printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
+                      dev->name);
 
        schedule_work(&tp->reset_task);
 }
@@ -6396,16 +6406,17 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        udelay(40);
 
        /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
-        * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
+        * If TG3_FLG2_IS_NIC is zero, we should read the
         * register to preserve the GPIO settings for LOMs. The GPIOs,
         * whether used as inputs or outputs, are set by boot code after
         * reset.
         */
-       if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+       if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
                u32 gpio_mask;
 
-               gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
-                           GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
+               gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+                           GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+                           GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 
                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
                        gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
@@ -6417,8 +6428,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
                tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
 
                /* GPIO1 must be driven high for eeprom write protect */
-               tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
-                                      GRC_LCLCTRL_GPIO_OUTPUT1);
+               if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
+                       tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+                                              GRC_LCLCTRL_GPIO_OUTPUT1);
        }
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
@@ -8656,7 +8668,9 @@ static int tg3_test_registers(struct tg3 *tp)
        return 0;
 
 out:
-       printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
+       if (netif_msg_hw(tp))
+               printk(KERN_ERR PFX "Register test failed at offset %x\n",
+                      offset);
        tw32(offset, save_val);
        return -EIO;
 }
@@ -8781,17 +8795,20 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                                        tg3_writephy(tp, 0x10, phy & ~0x4000);
                                tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
                        }
-               }
-               val = BMCR_LOOPBACK | BMCR_FULLDPLX;
-               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
-                       val |= BMCR_SPEED100;
-               else
-                       val |= BMCR_SPEED1000;
+                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
+               } else
+                       val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
 
                tg3_writephy(tp, MII_BMCR, val);
                udelay(40);
-               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+
+               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+                          MAC_MODE_LINK_POLARITY;
+               if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
                        tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
+                       mac_mode |= MAC_MODE_PORT_MODE_MII;
+               } else
+                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
 
                /* reset to prevent losing 1st rx packet intermittently */
                if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
@@ -8799,12 +8816,6 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
                        udelay(10);
                        tw32_f(MAC_RX_MODE, tp->rx_mode);
                }
-               mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
-                          MAC_MODE_LINK_POLARITY;
-               if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
-                       mac_mode |= MAC_MODE_PORT_MODE_MII;
-               else
-                       mac_mode |= MAC_MODE_PORT_MODE_GMII;
                if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
                        mac_mode &= ~MAC_MODE_LINK_POLARITY;
                        tg3_writephy(tp, MII_TG3_EXT_CTRL,
@@ -9456,16 +9467,12 @@ static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
 static void __devinit tg3_nvram_init(struct tg3 *tp)
 {
-       int j;
-
        tw32_f(GRC_EEPROM_ADDR,
             (EEPROM_ADDR_FSM_RESET |
              (EEPROM_DEFAULT_CLOCK_PERIOD <<
               EEPROM_ADDR_CLKPERD_SHIFT)));
 
-       /* XXX schedule_timeout() ... */
-       for (j = 0; j < 100; j++)
-               udelay(10);
+       msleep(1);
 
        /* Enable seeprom accesses. */
        tw32_f(GRC_LOCAL_CTRL,
@@ -9526,12 +9533,12 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
              EEPROM_ADDR_ADDR_MASK) |
             EEPROM_ADDR_READ | EEPROM_ADDR_START);
 
-       for (i = 0; i < 10000; i++) {
+       for (i = 0; i < 1000; i++) {
                tmp = tr32(GRC_EEPROM_ADDR);
 
                if (tmp & EEPROM_ADDR_COMPLETE)
                        break;
-               udelay(100);
+               msleep(1);
        }
        if (!(tmp & EEPROM_ADDR_COMPLETE))
                return -EBUSY;
@@ -9656,12 +9663,12 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
                        EEPROM_ADDR_START |
                        EEPROM_ADDR_WRITE);
 
-               for (j = 0; j < 10000; j++) {
+               for (j = 0; j < 1000; j++) {
                        val = tr32(GRC_EEPROM_ADDR);
 
                        if (val & EEPROM_ADDR_COMPLETE)
                                break;
-                       udelay(100);
+                       msleep(1);
                }
                if (!(val & EEPROM_ADDR_COMPLETE)) {
                        rc = -EBUSY;
@@ -9965,8 +9972,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
        tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
-               if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM))
+               if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
                        tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+               }
                return;
        }
 
@@ -10066,10 +10075,17 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
                        tp->led_ctrl = LED_CTRL_MODE_PHY_2;
 
-               if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
+               if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
                        tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
-               else
+                       if ((tp->pdev->subsystem_vendor ==
+                            PCI_VENDOR_ID_ARIMA) &&
+                           (tp->pdev->subsystem_device == 0x205a ||
+                            tp->pdev->subsystem_device == 0x2063))
+                               tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+               } else {
                        tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+                       tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+               }
 
                if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
                        tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -10147,7 +10163,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
 
        if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
-               u32 bmsr, adv_reg, tg3_ctrl;
+               u32 bmsr, adv_reg, tg3_ctrl, mask;
 
                tg3_readphy(tp, MII_BMSR, &bmsr);
                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -10171,7 +10187,10 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
                }
 
-               if (!tg3_copper_is_advertising_all(tp)) {
+               mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+               if (!tg3_copper_is_advertising_all(tp, mask)) {
                        tg3_writephy(tp, MII_ADVERTISE, adv_reg);
 
                        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
@@ -10695,7 +10714,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
                tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
 
        /* Get eeprom hw config before calling tg3_set_power_state().
-        * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
+        * In particular, the TG3_FLG2_IS_NIC flag must be
         * determined before calling tg3_set_power_state() so that
         * we know whether or not to switch out of Vaux power.
         * When the flag is set, it means that GPIO1 is used for eeprom
@@ -10862,7 +10881,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
              tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
            (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
             (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
-             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)) ||
+             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
                tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
 
@@ -11734,7 +11754,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 #endif
        spin_lock_init(&tp->lock);
        spin_lock_init(&tp->indirect_lock);
-       INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
+       INIT_WORK(&tp->reset_task, tg3_reset_task);
 
        tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
        if (tp->regs == 0UL) {
@@ -11912,13 +11932,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
 
        pci_set_drvdata(pdev, dev);
 
-       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
+       printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
               dev->name,
               tp->board_part_number,
               tp->pci_chip_rev_id,
               tg3_phy_string(tp),
               tg3_bus_string(tp, str),
-              (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
+              ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+               ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+                "10/100/1000Base-T")));
 
        for (i = 0; i < 6; i++)
                printk("%2.2x%c", dev->dev_addr[i],
index 92f53000bce69f2895dad2183ac37013de7e58cb..dfaf4ed127bd712378168eb1b64b82b4bd5f39ef 100644 (file)
@@ -2233,6 +2233,7 @@ struct tg3 {
 #define TG3_FLG2_PCI_EXPRESS           0x00000200
 #define TG3_FLG2_ASF_NEW_HANDSHAKE     0x00000400
 #define TG3_FLG2_HW_AUTONEG            0x00000800
+#define TG3_FLG2_IS_NIC                        0x00001000
 #define TG3_FLG2_PHY_SERDES            0x00002000
 #define TG3_FLG2_CAPACITIVE_COUPLING   0x00004000
 #define TG3_FLG2_FLASH                 0x00008000
index e14f5a00f65af221fd8a55d86412b8767ca3bac5..f85f0025112381ce98aa93bb250a17a20b2d17e2 100644 (file)
@@ -296,6 +296,7 @@ static void TLan_SetMulticastList( struct net_device *);
 static int     TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
 static int      TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent);
 static void    TLan_tx_timeout( struct net_device *dev);
+static void    TLan_tx_timeout_work(struct work_struct *work);
 static int     tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
 
 static u32     TLan_HandleInvalid( struct net_device *, u16 );
@@ -562,6 +563,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
        priv = netdev_priv(dev);
 
        priv->pciDev = pdev;
+       priv->dev = dev;
 
        /* Is this a PCI device? */
        if (pdev) {
@@ -634,7 +636,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
 
        /* This will be used when we get an adapter error from
         * within our irq handler */
-       INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev);
+       INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
 
        spin_lock_init(&priv->lock);
 
@@ -1040,6 +1042,25 @@ static void TLan_tx_timeout(struct net_device *dev)
 }
 
 
+       /***************************************************************
+        *      TLan_tx_timeout_work
+        *
+        *      Returns: nothing
+        *
+        *      Params:
+        *              work    work item of device which timed out
+        *
+        **************************************************************/
+
+static void TLan_tx_timeout_work(struct work_struct *work)
+{
+       TLanPrivateInfo *priv =
+               container_of(work, TLanPrivateInfo, tlan_tqueue);
+
+       TLan_tx_timeout(priv->dev);
+}
+
+
 
        /***************************************************************
         *      TLan_StartTx
index a44e2f2ef62a4cb5963422e926cff3df8249ea12..41ce0b6659378833863f3d176c7ec55183c7814a 100644 (file)
@@ -170,6 +170,7 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
 typedef struct tlan_private_tag {
        struct net_device       *nextDevice;
        struct pci_dev          *pciDev;
+       struct net_device       *dev;
        void                    *dmaStorage;
        dma_addr_t              dmaStorageDMA;
        unsigned int            dmaSize;
index bfe59865b1dd50e5c4dbd4cefe506a31e1495a1a..0d97e10ccac580e16d3dffbe4a9a88144360e64a 100644 (file)
@@ -1826,7 +1826,7 @@ static void tr_rx(struct net_device *dev)
        skb->protocol = tr_type_trans(skb, dev);
        if (IPv4_p) {
                skb->csum = chksum;
-               skb->ip_summed = 1;
+               skb->ip_summed = CHECKSUM_COMPLETE;
        }
        netif_rx(skb);
        dev->last_rx = jiffies;
index 46dabdb120716cf2bc2a0422b58e168b555edb99..cec282a6f62d40b7ded07994bff8e3e729503227 100644 (file)
@@ -5706,7 +5706,7 @@ int __init init_module(void)
         return found ? 0 : -ENODEV;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
         int i;
 
index fa3a2bb105ad3e92690a594a8f02ecfe68b71287..942b839ccc5bab541fdd7113320f59925fddd1e8 100644 (file)
@@ -26,10 +26,11 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
 
 /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
    of available transceivers.  */
-void t21142_media_task(void *data)
+void t21142_media_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct tulip_private *tp = netdev_priv(dev);
+       struct tulip_private *tp =
+               container_of(work, struct tulip_private, media_work);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->base_addr;
        int csr12 = ioread32(ioaddr + CSR12);
        int next_tick = 60*HZ;
index 3f4b6408b755ef6a3d2379329c0bffd4e2dbd36b..4b3cd3d8b62abba1fa54d93fc4afb7a6666a0e10 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC_PMAC */
 
 #include "de4x5.h"
 
@@ -4151,7 +4151,7 @@ get_hw_addr(struct net_device *dev)
     /* If possible, try to fix a broken card - SMC only so far */
     srom_repair(dev, broken);
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#ifdef CONFIG_PPC_PMAC
     /*
     ** If the address starts with 00 a0, we have to bit-reverse
     ** each byte of the address.
@@ -4168,7 +4168,7 @@ get_hw_addr(struct net_device *dev)
                    dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
            }
     }
-#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC_PMAC */
 
     /* Test for a bad enet address */
     status = test_bad_enet(dev, status);
index 066e5d6bcbd8313c101fe5a8dac78d9e41de85e6..df326fe1cc8f4f3d9b35b43e2196c4fe9dbcbacf 100644 (file)
 #include "tulip.h"
 
 
-void tulip_media_task(void *data)
+void tulip_media_task(struct work_struct *work)
 {
-       struct net_device *dev = data;
-       struct tulip_private *tp = netdev_priv(dev);
+       struct tulip_private *tp =
+               container_of(work, struct tulip_private, media_work);
+       struct net_device *dev = tp->dev;
        void __iomem *ioaddr = tp->base_addr;
        u32 csr12 = ioread32(ioaddr + CSR12);
        int next_tick = 2*HZ;
index ad107f45c7b1d3f6a5442a259cfda2b6fc49d976..25f25da7691714f881073404dc71e8500b8fc758 100644 (file)
@@ -44,7 +44,7 @@ struct tulip_chip_table {
        int valid_intrs;        /* CSR7 interrupt enable settings */
        int flags;
        void (*media_timer) (unsigned long);
-       void (*media_task) (void *);
+       work_func_t media_task;
 };
 
 
@@ -392,6 +392,7 @@ struct tulip_private {
        int csr12_shadow;
        int pad0;               /* Used for 8-byte alignment */
        struct work_struct media_work;
+       struct net_device *dev;
 };
 
 
@@ -406,7 +407,7 @@ struct eeprom_fixup {
 
 /* 21142.c */
 extern u16 t21142_csr14[];
-void t21142_media_task(void *data);
+void t21142_media_task(struct work_struct *work);
 void t21142_start_nway(struct net_device *dev);
 void t21142_lnk_change(struct net_device *dev, int csr5);
 
@@ -444,7 +445,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
 void pnic_timer(unsigned long data);
 
 /* timer.c */
-void tulip_media_task(void *data);
+void tulip_media_task(struct work_struct *work);
 void mxic_timer(unsigned long data);
 void comet_timer(unsigned long data);
 
index 0aee618f883c34a3550a51b1c231ab75c08162e6..5a35354aa523fb6dee18a02ea80b0ef558334f9b 100644 (file)
@@ -1367,6 +1367,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
         * it is zeroed and aligned in alloc_etherdev
         */
        tp = netdev_priv(dev);
+       tp->dev = dev;
 
        tp->rx_ring = pci_alloc_consistent(pdev,
                                           sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
@@ -1389,7 +1390,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
        tp->timer.data = (unsigned long)dev;
        tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
 
-       INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
+       INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
 
        dev->base_addr = (unsigned long)ioaddr;
 
index 931cbdf6d79165648e0a383235af4221c71581c6..b2a23aed4428576b5dc55adfc60db4588f6e001f 100644 (file)
@@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty);
 static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
 static void cpc_tty_flush_buffer(struct tty_struct *tty);
 static void cpc_tty_hangup(struct tty_struct *tty);
-static void cpc_tty_rx_work(void *data);
-static void cpc_tty_tx_work(void *data);
+static void cpc_tty_rx_work(struct work_struct *work);
+static void cpc_tty_tx_work(struct work_struct *work);
 static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
 static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
 static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
@@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev)
        cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
        cpc_tty->pc300dev = pc300dev; 
 
-       INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
-       INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
+       INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work);
+       INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work);
        
        cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
 
@@ -659,21 +659,23 @@ static void cpc_tty_hangup(struct tty_struct *tty)
  * o call the line disc. read
  * o free memory
  */
-static void cpc_tty_rx_work(void * data)
+static void cpc_tty_rx_work(struct work_struct *work)
 {
+       st_cpc_tty_area *cpc_tty;
        unsigned long port;
        int i, j;
-       st_cpc_tty_area *cpc_tty; 
        volatile st_cpc_rx_buf *buf;
        char flags=0,flg_rx=1; 
        struct tty_ldisc *ld;
 
        if (cpc_tty_cnt == 0) return;
-
        
        for (i=0; (i < 4) && flg_rx ; i++) {
                flg_rx = 0;
-               port = (unsigned long)data;
+
+               cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work);
+               port = cpc_tty - cpc_tty_area;
+
                for (j=0; j < CPC_TTY_NPORTS; j++) {
                        cpc_tty = &cpc_tty_area[port];
                
@@ -882,9 +884,10 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
  * o if need call line discipline wakeup
  * o call wake_up_interruptible
  */
-static void cpc_tty_tx_work(void *data)
+static void cpc_tty_tx_work(struct work_struct *work)
 {
-       st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; 
+       st_cpc_tty_area *cpc_tty =
+               container_of(work, st_cpc_tty_area, tty_tx_work);
        struct tty_struct *tty; 
 
        CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
index 41f1d6778849b99f520f8f36da76770c6d08437a..7f38012b9c92e2ddb094f3c53240c90850744563 100644 (file)
@@ -538,7 +538,7 @@ static void cleanup_card(struct net_device *dev)
        iounmap(ei_status.mem);
 }
 
-void
+void __exit
 cleanup_module(void)
 {
        int this_dev;
index efcdaf1c5f735fd7b5619a2c39ae221b6e0708f4..44a22701da9734df0155a7323cc574ee6752c071 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/uaccess.h>
 #include <net/ieee80211.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include "airo.h"
 
index ac9437d497f08e9bf530a7a3eb1e1aa721af9599..f12355398fe7ca19186d976374c216e07a89c6bf 100644 (file)
@@ -219,21 +219,6 @@ static int airo_config(struct pcmcia_device *link)
        dev = link->priv;
 
        DEBUG(0, "airo_config(0x%p)\n", link);
-       
-       /*
-         This reads the card's CONFIG tuple to find its configuration
-         registers.
-       */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
 
        /*
          In this loop, we scan the CIS for configuration table entries,
@@ -247,6 +232,10 @@ static int airo_config(struct pcmcia_device *link)
          these things without consulting the CIS, and most client drivers
          will only use the CIS to fill in implementation-defined details.
        */
+       tuple.Attributes = 0;
+       tuple.TupleData = buf;
+       tuple.TupleDataMax = sizeof(buf);
+       tuple.TupleOffset = 0;
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
        while (1) {
index 5c410989c4d7f7bc17822eac9d9ba52f4891bb66..12617cd0b78eb0be40247cfec84e874935ef5e64 100644 (file)
@@ -243,17 +243,6 @@ static int atmel_config(struct pcmcia_device *link)
        tuple.TupleDataMax = sizeof(buf);
        tuple.TupleOffset = 0;
 
-       /*
-         This reads the card's CONFIG tuple to find its configuration
-         registers.
-       */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        /*
          In this loop, we scan the CIS for configuration table entries,
          each of which describes a valid card configuration, including
index 94dfb92fab5c2f501624e9dbda8c582b29167754..8286678513b937db84bbfebff057191a62a0bdab 100644 (file)
@@ -819,7 +819,7 @@ struct bcm43xx_private {
        struct tasklet_struct isr_tasklet;
 
        /* Periodic tasks */
-       struct work_struct periodic_work;
+       struct delayed_work periodic_work;
        unsigned int periodic_state;
 
        struct work_struct restart_work;
index 5b3c27359a1840ba2c2bf59bd1264da401fd720e..2ec2e5afce67dd67d6e62e235e4ac68db37fb76c 100644 (file)
@@ -3215,9 +3215,10 @@ static void do_periodic_work(struct bcm43xx_private *bcm)
        schedule_delayed_work(&bcm->periodic_work, HZ * 15);
 }
 
-static void bcm43xx_periodic_work_handler(void *d)
+static void bcm43xx_periodic_work_handler(struct work_struct *work)
 {
-       struct bcm43xx_private *bcm = d;
+       struct bcm43xx_private *bcm =
+               container_of(work, struct bcm43xx_private, periodic_work.work);
        struct net_device *net_dev = bcm->net_dev;
        unsigned long flags;
        u32 savedirqs = 0;
@@ -3279,11 +3280,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
 
 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
 {
-       struct work_struct *work = &(bcm->periodic_work);
+       struct delayed_work *work = &bcm->periodic_work;
 
        assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
-       INIT_WORK(work, bcm43xx_periodic_work_handler, bcm);
-       schedule_work(work);
+       INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
+       schedule_delayed_work(work, 0);
 }
 
 static void bcm43xx_security_init(struct bcm43xx_private *bcm)
@@ -3635,7 +3636,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
        bcm43xx_periodic_tasks_setup(bcm);
 
        /*FIXME: This should be handled by softmac instead. */
-       schedule_work(&bcm->softmac->associnfo.work);
+       schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
 
 out:
        mutex_unlock(&(bcm)->mutex);
@@ -4182,9 +4183,10 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
 /* Hard-reset the chip. Do not call this directly.
  * Use bcm43xx_controller_restart()
  */
-static void bcm43xx_chip_reset(void *_bcm)
+static void bcm43xx_chip_reset(struct work_struct *work)
 {
-       struct bcm43xx_private *bcm = _bcm;
+       struct bcm43xx_private *bcm =
+               container_of(work, struct bcm43xx_private, restart_work);
        struct bcm43xx_phyinfo *phy;
        int err = -ENODEV;
 
@@ -4211,7 +4213,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
        if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
                return;
        printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
-       INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
+       INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
        schedule_work(&bcm->restart_work);
 }
 
index e663518bd5704c7273dd4197a08d2b91d1bc5042..e89c890d16fd7c0864ab964b675270f5c55b07ea 100644 (file)
@@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc);
 struct net_device_stats *hostap_get_stats(struct net_device *dev);
 void hostap_setup_dev(struct net_device *dev, local_info_t *local,
                      int main_dev);
-void hostap_set_multicast_list_queue(void *data);
+void hostap_set_multicast_list_queue(struct work_struct *work);
 int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
 int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
 void hostap_cleanup(local_info_t *local);
index ba13125024cb35d54fa882a518d640a5f665c2c7..974a8e5bec8b493b6061a6433fa02fabdf5d1094 100644 (file)
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
 static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
 static void hostap_event_expired_sta(struct net_device *dev,
                                     struct sta_info *sta);
-static void handle_add_proc_queue(void *data);
+static void handle_add_proc_queue(struct work_struct *work);
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void handle_wds_oper_queue(void *data);
+static void handle_wds_oper_queue(struct work_struct *work);
 static void prism2_send_mgmt(struct net_device *dev,
                             u16 type_subtype, char *body,
                             int body_len, u8 *addr, u16 tx_cb_idx);
@@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local)
        INIT_LIST_HEAD(&ap->sta_list);
 
        /* Initialize task queue structure for AP management */
-       INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+       INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue);
 
        ap->tx_callback_idx =
                hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
@@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local)
                printk(KERN_WARNING "%s: failed to register TX callback for "
                       "AP\n", local->dev->name);
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-       INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+       INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue);
 
        ap->tx_callback_auth =
                hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
@@ -1062,9 +1062,10 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off,
 }
 
 
-static void handle_add_proc_queue(void *data)
+static void handle_add_proc_queue(struct work_struct *work)
 {
-       struct ap_data *ap = (struct ap_data *) data;
+       struct ap_data *ap = container_of(work, struct ap_data,
+                                         add_sta_proc_queue);
        struct sta_info *sta;
        char name[20];
        struct add_sta_proc_data *entry, *prev;
@@ -1099,15 +1100,13 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
 {
        struct sta_info *sta;
 
-       sta = (struct sta_info *)
-               kmalloc(sizeof(struct sta_info), GFP_ATOMIC);
+       sta = kzalloc(sizeof(struct sta_info), GFP_ATOMIC);
        if (sta == NULL) {
                PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
                return NULL;
        }
 
        /* initialize STA info data */
-       memset(sta, 0, sizeof(struct sta_info));
        sta->local = ap->local;
        skb_queue_head_init(&sta->tx_buf);
        memcpy(sta->addr, addr, ETH_ALEN);
@@ -1952,9 +1951,11 @@ static void handle_pspoll(local_info_t *local,
 
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
 
-static void handle_wds_oper_queue(void *data)
+static void handle_wds_oper_queue(struct work_struct *work)
 {
-       local_info_t *local = data;
+       struct ap_data *ap = container_of(work, struct ap_data,
+                                         wds_oper_queue);
+       local_info_t *local = ap->local;
        struct wds_oper_data *entry, *prev;
 
        spin_lock_bh(&local->lock);
index f63909e4bc329b5904ca6067140644261d61de0e..8d8f4b9b8b07a032ef3c04ac1b928ca4187c289e 100644 (file)
@@ -293,15 +293,12 @@ static int sandisk_enable_wireless(struct net_device *dev)
                goto done;
        }
 
-       tuple.DesiredTuple = CISTPL_MANFID;
        tuple.Attributes = TUPLE_RETURN_COMMON;
        tuple.TupleData = buf;
        tuple.TupleDataMax = sizeof(buf);
        tuple.TupleOffset = 0;
-       if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
-           pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
-           pcmcia_parse_tuple(hw_priv->link, &tuple, parse) ||
-           parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
+
+       if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
                /* No SanDisk manfid found */
                ret = -ENODEV;
                goto done;
@@ -566,23 +563,16 @@ static int prism2_config(struct pcmcia_device *link)
        PDEBUG(DEBUG_FLOW, "prism2_config()\n");
 
        parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
-       hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+       hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
        if (parse == NULL || hw_priv == NULL) {
                ret = -ENOMEM;
                goto failed;
        }
-       memset(hw_priv, 0, sizeof(*hw_priv));
 
-       tuple.DesiredTuple = CISTPL_CONFIG;
        tuple.Attributes = 0;
        tuple.TupleData = buf;
        tuple.TupleDataMax = sizeof(buf);
        tuple.TupleOffset = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-       link->conf.ConfigBase = parse->config.base;
-       link->conf.Present = parse->config.rmask[0];
 
        CS_CHECK(GetConfigurationInfo,
                 pcmcia_get_configuration_info(link, &conf));
index ab26b52b3e768ef1f270735c0d9782fbed8babe6..24fc387bba6753255caee927c78352a6bcc2a2fc 100644 (file)
@@ -685,14 +685,12 @@ static int prism2_download(local_info_t *local,
                goto out;
        }
 
-       dl = kmalloc(sizeof(*dl) + param->num_areas *
+       dl = kzalloc(sizeof(*dl) + param->num_areas *
                     sizeof(struct prism2_download_data_area), GFP_KERNEL);
        if (dl == NULL) {
                ret = -ENOMEM;
                goto out;
        }
-       memset(dl, 0, sizeof(*dl) + param->num_areas *
-              sizeof(struct prism2_download_data_area));
        dl->dl_cmd = param->dl_cmd;
        dl->start_addr = param->start_addr;
        dl->num_areas = param->num_areas;
index ed00ebb6e7f4bd255d1a1fe8bf05a6f7a5574d6e..a394a23b9a20090ae54922a774725d2848cdd332 100644 (file)
@@ -347,14 +347,12 @@ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
        if (signal_pending(current))
                return -EINTR;
 
-       entry = (struct hostap_cmd_queue *)
-               kmalloc(sizeof(*entry), GFP_ATOMIC);
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL) {
                printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
                       dev->name);
                return -ENOMEM;
        }
-       memset(entry, 0, sizeof(*entry));
        atomic_set(&entry->usecnt, 1);
        entry->type = CMD_SLEEP;
        entry->cmd = cmd;
@@ -517,14 +515,12 @@ static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
                return -1;
        }
 
-       entry = (struct hostap_cmd_queue *)
-               kmalloc(sizeof(*entry), GFP_ATOMIC);
+       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL) {
                printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
                       "failed\n", dev->name);
                return -ENOMEM;
        }
-       memset(entry, 0, sizeof(*entry));
        atomic_set(&entry->usecnt, 1);
        entry->type = CMD_CALLBACK;
        entry->cmd = cmd;
@@ -1645,9 +1641,9 @@ static void prism2_schedule_reset(local_info_t *local)
 
 /* Called only as scheduled task after noticing card timeout in interrupt
  * context */
-static void handle_reset_queue(void *data)
+static void handle_reset_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, reset_queue);
 
        printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
        prism2_hw_reset(local->dev);
@@ -2896,9 +2892,10 @@ static void hostap_passive_scan(unsigned long data)
 
 /* Called only as a scheduled task when communications quality values should
  * be updated. */
-static void handle_comms_qual_update(void *data)
+static void handle_comms_qual_update(struct work_struct *work)
 {
-       local_info_t *local = data;
+       local_info_t *local =
+               container_of(work, local_info_t, comms_qual_update);
        prism2_update_comms_qual(local->dev);
 }
 
@@ -3015,14 +3012,12 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
        iface = netdev_priv(dev);
        local = iface->local;
 
-       new_entry = (struct set_tim_data *)
-               kmalloc(sizeof(*new_entry), GFP_ATOMIC);
+       new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
        if (new_entry == NULL) {
                printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
                       local->dev->name);
                return -ENOMEM;
        }
-       memset(new_entry, 0, sizeof(*new_entry));
        new_entry->aid = aid;
        new_entry->set = set;
 
@@ -3050,9 +3045,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set)
 }
 
 
-static void handle_set_tim_queue(void *data)
+static void handle_set_tim_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, set_tim_queue);
        struct set_tim_data *entry;
        u16 val;
 
@@ -3209,15 +3204,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
        local->scan_channel_mask = 0xffff;
 
        /* Initialize task queue structures */
-       INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+       INIT_WORK(&local->reset_queue, handle_reset_queue);
        INIT_WORK(&local->set_multicast_list_queue,
-                 hostap_set_multicast_list_queue, local->dev);
+                 hostap_set_multicast_list_queue);
 
-       INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+       INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
        INIT_LIST_HEAD(&local->set_tim_list);
        spin_lock_init(&local->set_tim_lock);
 
-       INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+       INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
 
        /* Initialize tasklets for handling hardware IRQ related operations
         * outside hw IRQ handler */
index 50f72d831cf40a8e89ffc37bbf96066825aa3484..b6a02a02da74e645287aa943ea3cfc57d21b53a9 100644 (file)
@@ -327,11 +327,10 @@ static void prism2_info_hostscanresults(local_info_t *local,
        ptr = (u8 *) pos;
 
        new_count = left / result_size;
-       results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
+       results = kcalloc(new_count, sizeof(struct hfa384x_hostscan_result),
                          GFP_ATOMIC);
        if (results == NULL)
                return;
-       memset(results, 0, new_count * sizeof(struct hfa384x_hostscan_result));
 
        for (i = 0; i < new_count; i++) {
                memcpy(&results[i], ptr, copy_len);
@@ -474,9 +473,9 @@ static void handle_info_queue_scanresults(local_info_t *local)
 
 /* Called only as scheduled task after receiving info frames (used to avoid
  * pending too much time in HW IRQ handler). */
-static void handle_info_queue(void *data)
+static void handle_info_queue(struct work_struct *work)
 {
-       local_info_t *local = (local_info_t *) data;
+       local_info_t *local = container_of(work, local_info_t, info_queue);
 
        if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
                               &local->pending_info))
@@ -493,7 +492,7 @@ void hostap_info_init(local_info_t *local)
 {
        skb_queue_head_init(&local->info_list);
 #ifndef PRISM2_NO_STATION_MODES
-       INIT_WORK(&local->info_queue, handle_info_queue, local);
+       INIT_WORK(&local->info_queue, handle_info_queue);
 #endif /* PRISM2_NO_STATION_MODES */
 }
 
index d061fb3443ff9ea614af960986adcc44930f2794..3b7b8063ff1c7803eb18fb3ba48039ee5c65077f 100644 (file)
@@ -181,12 +181,10 @@ static int prism2_ioctl_siwencode(struct net_device *dev,
                struct ieee80211_crypt_data *new_crypt;
 
                /* take WEP into use */
-               new_crypt = (struct ieee80211_crypt_data *)
-                       kmalloc(sizeof(struct ieee80211_crypt_data),
+               new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
                                GFP_KERNEL);
                if (new_crypt == NULL)
                        return -ENOMEM;
-               memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
                new_crypt->ops = ieee80211_get_crypto_ops("WEP");
                if (!new_crypt->ops) {
                        request_module("ieee80211_crypt_wep");
@@ -3320,14 +3318,12 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
 
                prism2_crypt_delayed_deinit(local, crypt);
 
-               new_crypt = (struct ieee80211_crypt_data *)
-                       kmalloc(sizeof(struct ieee80211_crypt_data),
+               new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
                                GFP_KERNEL);
                if (new_crypt == NULL) {
                        ret = -ENOMEM;
                        goto done;
                }
-               memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
                new_crypt->ops = ops;
                new_crypt->priv = new_crypt->ops->init(i);
                if (new_crypt->priv == NULL) {
@@ -3538,14 +3534,12 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
 
                prism2_crypt_delayed_deinit(local, crypt);
 
-               new_crypt = (struct ieee80211_crypt_data *)
-                       kmalloc(sizeof(struct ieee80211_crypt_data),
+               new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
                                GFP_KERNEL);
                if (new_crypt == NULL) {
                        ret = -ENOMEM;
                        goto done;
                }
-               memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
                new_crypt->ops = ops;
                new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
                if (new_crypt->priv == NULL) {
index 53374fcba77e5fc220c04ab26802ae43dd17fb73..0796be9d9e77b2644901285a75db9250dc6e732b 100644 (file)
@@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p)
 
 /* TODO: to be further implemented as soon as Prism2 fully supports
  *       GroupAddresses and correct documentation is available */
-void hostap_set_multicast_list_queue(void *data)
+void hostap_set_multicast_list_queue(struct work_struct *work)
 {
-       struct net_device *dev = (struct net_device *) data;
+       local_info_t *local =
+               container_of(work, local_info_t, set_multicast_list_queue);
+       struct net_device *dev = local->dev;
        struct hostap_interface *iface;
-       local_info_t *local;
 
        iface = netdev_priv(dev);
-       local = iface->local;
        if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
                            local->is_promisc)) {
                printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
index d1de9766c831f6059ff8aafa95bd541b25af5fe7..c4f6020baa9ee7d12bde7e27ab3aebac4bc9f842 100644 (file)
@@ -300,10 +300,9 @@ static int prism2_pci_probe(struct pci_dev *pdev,
        struct hostap_interface *iface;
        struct hostap_pci_priv *hw_priv;
 
-       hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+       hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
        if (hw_priv == NULL)
                return -ENOMEM;
-       memset(hw_priv, 0, sizeof(*hw_priv));
 
        if (pci_enable_device(pdev))
                goto err_out_free;
index bc81b13a5a2a95a5bce76816c5ca31f0e8373092..e235e06478970d2e240986be13e96930981942db 100644 (file)
@@ -447,10 +447,9 @@ static int prism2_plx_probe(struct pci_dev *pdev,
        int tmd7160;
        struct hostap_plx_priv *hw_priv;
 
-       hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+       hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
        if (hw_priv == NULL)
                return -ENOMEM;
-       memset(hw_priv, 0, sizeof(*hw_priv));
 
        if (pci_enable_device(pdev))
                goto err_out_free;
index 79607b8b877ce74e849e87a22c1a6290ce3c8376..dd9ba4aad7bb402b080518351f87db32da015f83 100644 (file)
@@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv,
                                     struct ipw2100_fw *fw);
 static int ipw2100_ucode_download(struct ipw2100_priv *priv,
                                  struct ipw2100_fw *fw);
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+static void ipw2100_wx_event_work(struct work_struct *work);
 static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
 static struct iw_handler_def ipw2100_wx_handler_def;
 
@@ -679,7 +679,8 @@ static void schedule_reset(struct ipw2100_priv *priv)
                        queue_delayed_work(priv->workqueue, &priv->reset_work,
                                           priv->reset_backoff * HZ);
                else
-                       queue_work(priv->workqueue, &priv->reset_work);
+                       queue_delayed_work(priv->workqueue, &priv->reset_work,
+                                          0);
 
                if (priv->reset_backoff < MAX_RESET_BACKOFF)
                        priv->reset_backoff++;
@@ -1873,8 +1874,10 @@ static void ipw2100_down(struct ipw2100_priv *priv)
        netif_stop_queue(priv->net_dev);
 }
 
-static void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+static void ipw2100_reset_adapter(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, reset_work.work);
        unsigned long flags;
        union iwreq_data wrqu = {
                .ap_addr = {
@@ -2071,9 +2074,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
                return;
 
        if (priv->status & STATUS_SECURITY_UPDATED)
-               queue_work(priv->workqueue, &priv->security_work);
+               queue_delayed_work(priv->workqueue, &priv->security_work, 0);
 
-       queue_work(priv->workqueue, &priv->wx_event_work);
+       queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
 }
 
 static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -5524,8 +5527,11 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
        return err;
 }
 
-static void ipw2100_security_work(struct ipw2100_priv *priv)
+static void ipw2100_security_work(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, security_work.work);
+
        /* If we happen to have reconnected before we get a chance to
         * process this, then update the security settings--which causes
         * a disassociation to occur */
@@ -5748,7 +5754,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p)
 
        priv->reset_backoff = 0;
        mutex_unlock(&priv->action_mutex);
-       ipw2100_reset_adapter(priv);
+       ipw2100_reset_adapter(&priv->reset_work.work);
        return 0;
 
       done:
@@ -5910,9 +5916,10 @@ static const struct ethtool_ops ipw2100_ethtool_ops = {
        .get_drvinfo = ipw_ethtool_get_drvinfo,
 };
 
-static void ipw2100_hang_check(void *adapter)
+static void ipw2100_hang_check(struct work_struct *work)
 {
-       struct ipw2100_priv *priv = adapter;
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, hang_check.work);
        unsigned long flags;
        u32 rtc = 0xa5a5a5a5;
        u32 len = sizeof(rtc);
@@ -5952,9 +5959,10 @@ static void ipw2100_hang_check(void *adapter)
        spin_unlock_irqrestore(&priv->low_lock, flags);
 }
 
-static void ipw2100_rf_kill(void *adapter)
+static void ipw2100_rf_kill(struct work_struct *work)
 {
-       struct ipw2100_priv *priv = adapter;
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, rf_kill.work);
        unsigned long flags;
 
        spin_lock_irqsave(&priv->low_lock, flags);
@@ -6103,14 +6111,11 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
 
        priv->workqueue = create_workqueue(DRV_NAME);
 
-       INIT_WORK(&priv->reset_work,
-                 (void (*)(void *))ipw2100_reset_adapter, priv);
-       INIT_WORK(&priv->security_work,
-                 (void (*)(void *))ipw2100_security_work, priv);
-       INIT_WORK(&priv->wx_event_work,
-                 (void (*)(void *))ipw2100_wx_event_work, priv);
-       INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
-       INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+       INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+       INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+       INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+       INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+       INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     ipw2100_irq_tasklet, (unsigned long)priv);
@@ -6215,7 +6220,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        /* Allocate and initialize the Tx/Rx queues and lists */
        if (ipw2100_queues_allocate(priv)) {
                printk(KERN_WARNING DRV_NAME
-                      "Error calilng ipw2100_queues_allocate.\n");
+                      "Error calling ipw2100_queues_allocate.\n");
                err = -ENOMEM;
                goto fail;
        }
@@ -8281,8 +8286,10 @@ static struct iw_handler_def ipw2100_wx_handler_def = {
        .get_wireless_stats = ipw2100_wx_wireless_stats,
 };
 
-static void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+static void ipw2100_wx_event_work(struct work_struct *work)
 {
+       struct ipw2100_priv *priv =
+               container_of(work, struct ipw2100_priv, wx_event_work.work);
        union iwreq_data wrqu;
        int len = ETH_ALEN;
 
index 55b7227198df4dae1e620b74d70903ab1fea9d22..de7d384d38af353d26082855b656bdaeb06554e9 100644 (file)
@@ -583,11 +583,11 @@ struct ipw2100_priv {
        struct tasklet_struct irq_tasklet;
 
        struct workqueue_struct *workqueue;
-       struct work_struct reset_work;
-       struct work_struct security_work;
-       struct work_struct wx_event_work;
-       struct work_struct hang_check;
-       struct work_struct rf_kill;
+       struct delayed_work reset_work;
+       struct delayed_work security_work;
+       struct delayed_work wx_event_work;
+       struct delayed_work hang_check;
+       struct delayed_work rf_kill;
 
        u32 interrupts;
        int tx_interrupts;
index c692d01a76ca528c8830b9ba30c392769b76eb0b..22cb3fb7502e280a16a4d5e73f95bb42482ead3a 100644 (file)
@@ -70,7 +70,7 @@
 #define VQ
 #endif
 
-#define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
+#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
 #define DRV_DESCRIPTION        "Intel(R) PRO/Wireless 2200/2915 Network Driver"
 #define DRV_COPYRIGHT  "Copyright(c) 2003-2006 Intel Corporation"
 #define DRV_VERSION     IPW2200_VERSION
@@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
 static void ipw_rx_queue_replenish(void *);
 static int ipw_up(struct ipw_priv *);
-static void ipw_bg_up(void *);
+static void ipw_bg_up(struct work_struct *work);
 static void ipw_down(struct ipw_priv *);
-static void ipw_bg_down(void *);
+static void ipw_bg_down(struct work_struct *work);
 static int ipw_config(struct ipw_priv *);
 static int init_supported_rates(struct ipw_priv *priv,
                                struct ipw_supported_rates *prates);
@@ -862,11 +862,12 @@ static void ipw_led_link_on(struct ipw_priv *priv)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_on(void *data)
+static void ipw_bg_led_link_on(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_link_on.work);
        mutex_lock(&priv->mutex);
-       ipw_led_link_on(data);
+       ipw_led_link_on(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -906,11 +907,12 @@ static void ipw_led_link_off(struct ipw_priv *priv)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_link_off(void *data)
+static void ipw_bg_led_link_off(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_link_off.work);
        mutex_lock(&priv->mutex);
-       ipw_led_link_off(data);
+       ipw_led_link_off(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -985,11 +987,12 @@ static void ipw_led_activity_off(struct ipw_priv *priv)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_led_activity_off(void *data)
+static void ipw_bg_led_activity_off(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, led_act_off.work);
        mutex_lock(&priv->mutex);
-       ipw_led_activity_off(data);
+       ipw_led_activity_off(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -2228,11 +2231,12 @@ static void ipw_adapter_restart(void *adapter)
        }
 }
 
-static void ipw_bg_adapter_restart(void *data)
+static void ipw_bg_adapter_restart(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, adapter_restart);
        mutex_lock(&priv->mutex);
-       ipw_adapter_restart(data);
+       ipw_adapter_restart(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -2249,11 +2253,12 @@ static void ipw_scan_check(void *data)
        }
 }
 
-static void ipw_bg_scan_check(void *data)
+static void ipw_bg_scan_check(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, scan_check.work);
        mutex_lock(&priv->mutex);
-       ipw_scan_check(data);
+       ipw_scan_check(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -3831,17 +3836,19 @@ static int ipw_disassociate(void *data)
        return 1;
 }
 
-static void ipw_bg_disassociate(void *data)
+static void ipw_bg_disassociate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, disassociate);
        mutex_lock(&priv->mutex);
-       ipw_disassociate(data);
+       ipw_disassociate(priv);
        mutex_unlock(&priv->mutex);
 }
 
-static void ipw_system_config(void *data)
+static void ipw_system_config(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, system_config);
 
 #ifdef CONFIG_IPW2200_PROMISCUOUS
        if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
@@ -4208,11 +4215,12 @@ static void ipw_gather_stats(struct ipw_priv *priv)
                           IPW_STATS_INTERVAL);
 }
 
-static void ipw_bg_gather_stats(void *data)
+static void ipw_bg_gather_stats(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, gather_stats.work);
        mutex_lock(&priv->mutex);
-       ipw_gather_stats(data);
+       ipw_gather_stats(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -4268,8 +4276,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
                if (!(priv->status & STATUS_ROAMING)) {
                        priv->status |= STATUS_ROAMING;
                        if (!(priv->status & STATUS_SCANNING))
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                }
                return;
        }
@@ -4607,8 +4615,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
 #ifdef CONFIG_IPW2200_MONITOR
                        if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
                                priv->status |= STATUS_SCAN_FORCED;
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                                break;
                        }
                        priv->status &= ~STATUS_SCAN_FORCED;
@@ -4631,8 +4639,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
                                        /* Don't schedule if we aborted the scan */
                                        priv->status &= ~STATUS_ROAMING;
                        } else if (priv->status & STATUS_SCAN_PENDING)
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                        else if (priv->config & CFG_BACKGROUND_SCAN
                                 && priv->status & STATUS_ASSOCIATED)
                                queue_delayed_work(priv->workqueue,
@@ -5055,11 +5063,12 @@ static void ipw_rx_queue_replenish(void *data)
        ipw_rx_queue_restock(priv);
 }
 
-static void ipw_bg_rx_queue_replenish(void *data)
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, rx_replenish);
        mutex_lock(&priv->mutex);
-       ipw_rx_queue_replenish(data);
+       ipw_rx_queue_replenish(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -5489,9 +5498,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
        return 1;
 }
 
-static void ipw_merge_adhoc_network(void *data)
+static void ipw_merge_adhoc_network(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, merge_networks);
        struct ieee80211_network *network = NULL;
        struct ipw_network_match match = {
                .network = priv->assoc_network
@@ -5948,11 +5958,12 @@ static void ipw_adhoc_check(void *data)
                           priv->assoc_request.beacon_interval);
 }
 
-static void ipw_bg_adhoc_check(void *data)
+static void ipw_bg_adhoc_check(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, adhoc_check.work);
        mutex_lock(&priv->mutex);
-       ipw_adhoc_check(data);
+       ipw_adhoc_check(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -6299,19 +6310,26 @@ done:
        return err;
 }
 
-static int ipw_request_passive_scan(struct ipw_priv *priv) {
-       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
+static void ipw_request_passive_scan(struct work_struct *work)
+{
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, request_passive_scan);
+       ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
 }
 
-static int ipw_request_scan(struct ipw_priv *priv) {
-       return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
+static void ipw_request_scan(struct work_struct *work)
+{
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, request_scan.work);
+       ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
 }
 
-static void ipw_bg_abort_scan(void *data)
+static void ipw_bg_abort_scan(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, abort_scan);
        mutex_lock(&priv->mutex);
-       ipw_abort_scan(data);
+       ipw_abort_scan(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -7084,9 +7102,10 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
 /*
 * background support to run QoS activate functionality
 */
-static void ipw_bg_qos_activate(void *data)
+static void ipw_bg_qos_activate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, qos_activate);
 
        if (priv == NULL)
                return;
@@ -7394,11 +7413,12 @@ static void ipw_roam(void *data)
        priv->status &= ~STATUS_ROAMING;
 }
 
-static void ipw_bg_roam(void *data)
+static void ipw_bg_roam(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, roam);
        mutex_lock(&priv->mutex);
-       ipw_roam(data);
+       ipw_roam(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -7479,8 +7499,8 @@ static int ipw_associate(void *data)
                                                   &priv->request_scan,
                                                   SCAN_INTERVAL);
                        else
-                               queue_work(priv->workqueue,
-                                          &priv->request_scan);
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->request_scan, 0);
                }
 
                return 0;
@@ -7491,11 +7511,12 @@ static int ipw_associate(void *data)
        return 1;
 }
 
-static void ipw_bg_associate(void *data)
+static void ipw_bg_associate(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, associate);
        mutex_lock(&priv->mutex);
-       ipw_associate(data);
+       ipw_associate(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -7656,7 +7677,8 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
 
        /* Big bitfield of all the fields we provide in radiotap */
        ipw_rt->rt_hdr.it_present =
-           ((1 << IEEE80211_RADIOTAP_FLAGS) |
+           ((1 << IEEE80211_RADIOTAP_TSFT) |
+            (1 << IEEE80211_RADIOTAP_FLAGS) |
             (1 << IEEE80211_RADIOTAP_RATE) |
             (1 << IEEE80211_RADIOTAP_CHANNEL) |
             (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7665,10 +7687,14 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
 
        /* Zero the flags, we'll add to them as we go */
        ipw_rt->rt_flags = 0;
-       ipw_rt->rt_tsf = 0ULL;
+       ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+                              frame->parent_tsf[2] << 16 |
+                              frame->parent_tsf[1] << 8  |
+                              frame->parent_tsf[0]);
 
        /* Convert signal to DBM */
        ipw_rt->rt_dbmsignal = antsignal;
+       ipw_rt->rt_dbmnoise = frame->noise;
 
        /* Convert the channel data and set the flags */
        ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
@@ -7868,7 +7894,8 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
 
        /* Big bitfield of all the fields we provide in radiotap */
        ipw_rt->rt_hdr.it_present =
-           ((1 << IEEE80211_RADIOTAP_FLAGS) |
+           ((1 << IEEE80211_RADIOTAP_TSFT) |
+            (1 << IEEE80211_RADIOTAP_FLAGS) |
             (1 << IEEE80211_RADIOTAP_RATE) |
             (1 << IEEE80211_RADIOTAP_CHANNEL) |
             (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7877,7 +7904,10 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
 
        /* Zero the flags, we'll add to them as we go */
        ipw_rt->rt_flags = 0;
-       ipw_rt->rt_tsf = 0ULL;
+       ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+                              frame->parent_tsf[2] << 16 |
+                              frame->parent_tsf[1] << 8  |
+                              frame->parent_tsf[0]);
 
        /* Convert to DBM */
        ipw_rt->rt_dbmsignal = signal;
@@ -8276,7 +8306,7 @@ static void ipw_rx(struct ipw_priv *priv)
                                    ("Notification: subtype=%02X flags=%02X size=%d\n",
                                     pkt->u.notification.subtype,
                                     pkt->u.notification.flags,
-                                    pkt->u.notification.size);
+                                    le16_to_cpu(pkt->u.notification.size));
                                ipw_rx_notification(priv, &pkt->u.notification);
                                break;
                        }
@@ -9410,7 +9440,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
 
        IPW_DEBUG_WX("Start scan\n");
 
-       queue_work(priv->workqueue, &priv->request_scan);
+       queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
 
        return 0;
 }
@@ -10547,11 +10577,12 @@ static void ipw_rf_kill(void *adapter)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static void ipw_bg_rf_kill(void *data)
+static void ipw_bg_rf_kill(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, rf_kill.work);
        mutex_lock(&priv->mutex);
-       ipw_rf_kill(data);
+       ipw_rf_kill(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -10582,11 +10613,12 @@ static void ipw_link_up(struct ipw_priv *priv)
                queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
 }
 
-static void ipw_bg_link_up(void *data)
+static void ipw_bg_link_up(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, link_up);
        mutex_lock(&priv->mutex);
-       ipw_link_up(data);
+       ipw_link_up(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -10606,15 +10638,16 @@ static void ipw_link_down(struct ipw_priv *priv)
 
        if (!(priv->status & STATUS_EXIT_PENDING)) {
                /* Queue up another scan... */
-               queue_work(priv->workqueue, &priv->request_scan);
+               queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
        }
 }
 
-static void ipw_bg_link_down(void *data)
+static void ipw_bg_link_down(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, link_down);
        mutex_lock(&priv->mutex);
-       ipw_link_down(data);
+       ipw_link_down(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -10626,38 +10659,30 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
        init_waitqueue_head(&priv->wait_command_queue);
        init_waitqueue_head(&priv->wait_state);
 
-       INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv);
-       INIT_WORK(&priv->associate, ipw_bg_associate, priv);
-       INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv);
-       INIT_WORK(&priv->system_config, ipw_system_config, priv);
-       INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv);
-       INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv);
-       INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv);
-       INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv);
-       INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
-       INIT_WORK(&priv->request_scan,
-                 (void (*)(void *))ipw_request_scan, priv);
-       INIT_WORK(&priv->request_passive_scan,
-                 (void (*)(void *))ipw_request_passive_scan, priv);
-       INIT_WORK(&priv->gather_stats,
-                 (void (*)(void *))ipw_bg_gather_stats, priv);
-       INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
-       INIT_WORK(&priv->roam, ipw_bg_roam, priv);
-       INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv);
-       INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv);
-       INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv);
-       INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on,
-                 priv);
-       INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off,
-                 priv);
-       INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off,
-                 priv);
-       INIT_WORK(&priv->merge_networks,
-                 (void (*)(void *))ipw_merge_adhoc_network, priv);
+       INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+       INIT_WORK(&priv->associate, ipw_bg_associate);
+       INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+       INIT_WORK(&priv->system_config, ipw_system_config);
+       INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+       INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+       INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+       INIT_WORK(&priv->up, ipw_bg_up);
+       INIT_WORK(&priv->down, ipw_bg_down);
+       INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+       INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+       INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+       INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+       INIT_WORK(&priv->roam, ipw_bg_roam);
+       INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+       INIT_WORK(&priv->link_up, ipw_bg_link_up);
+       INIT_WORK(&priv->link_down, ipw_bg_link_down);
+       INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+       INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+       INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+       INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
 
 #ifdef CONFIG_IPW2200_QOS
-       INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate,
-                 priv);
+       INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
 #endif                         /* CONFIG_IPW2200_QOS */
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -11129,14 +11154,13 @@ static int ipw_up(struct ipw_priv *priv)
                return -EIO;
 
        if (cmdlog && !priv->cmdlog) {
-               priv->cmdlog = kmalloc(sizeof(*priv->cmdlog) * cmdlog,
+               priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
                                       GFP_KERNEL);
                if (priv->cmdlog == NULL) {
                        IPW_ERROR("Error allocating %d command log entries.\n",
                                  cmdlog);
                        return -ENOMEM;
                } else {
-                       memset(priv->cmdlog, 0, sizeof(*priv->cmdlog) * cmdlog);
                        priv->cmdlog_len = cmdlog;
                }
        }
@@ -11190,7 +11214,8 @@ static int ipw_up(struct ipw_priv *priv)
 
                        /* If configure to try and auto-associate, kick
                         * off a scan. */
-                       queue_work(priv->workqueue, &priv->request_scan);
+                       queue_delayed_work(priv->workqueue,
+                                          &priv->request_scan, 0);
 
                        return 0;
                }
@@ -11211,11 +11236,12 @@ static int ipw_up(struct ipw_priv *priv)
        return -EIO;
 }
 
-static void ipw_bg_up(void *data)
+static void ipw_bg_up(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, up);
        mutex_lock(&priv->mutex);
-       ipw_up(data);
+       ipw_up(priv);
        mutex_unlock(&priv->mutex);
 }
 
@@ -11282,11 +11308,12 @@ static void ipw_down(struct ipw_priv *priv)
        ipw_led_radio_off(priv);
 }
 
-static void ipw_bg_down(void *data)
+static void ipw_bg_down(struct work_struct *work)
 {
-       struct ipw_priv *priv = data;
+       struct ipw_priv *priv =
+               container_of(work, struct ipw_priv, down);
        mutex_lock(&priv->mutex);
-       ipw_down(data);
+       ipw_down(priv);
        mutex_unlock(&priv->mutex);
 }
 
index dad5eedefbf1c828c20b1c43a755c2c370044ca0..626a240a87d869f111e73d9dc43300ac3276b689 100644 (file)
@@ -1290,21 +1290,21 @@ struct ipw_priv {
 
        struct workqueue_struct *workqueue;
 
-       struct work_struct adhoc_check;
+       struct delayed_work adhoc_check;
        struct work_struct associate;
        struct work_struct disassociate;
        struct work_struct system_config;
        struct work_struct rx_replenish;
-       struct work_struct request_scan;
+       struct delayed_work request_scan;
        struct work_struct request_passive_scan;
        struct work_struct adapter_restart;
-       struct work_struct rf_kill;
+       struct delayed_work rf_kill;
        struct work_struct up;
        struct work_struct down;
-       struct work_struct gather_stats;
+       struct delayed_work gather_stats;
        struct work_struct abort_scan;
        struct work_struct roam;
-       struct work_struct scan_check;
+       struct delayed_work scan_check;
        struct work_struct link_up;
        struct work_struct link_down;
 
@@ -1319,9 +1319,9 @@ struct ipw_priv {
        u32 led_ofdm_on;
        u32 led_ofdm_off;
 
-       struct work_struct led_link_on;
-       struct work_struct led_link_off;
-       struct work_struct led_act_off;
+       struct delayed_work led_link_on;
+       struct delayed_work led_link_off;
+       struct delayed_work led_act_off;
        struct work_struct merge_networks;
 
        struct ipw_cmd_log *cmdlog;
index 6714e0dfa8d6ebadd95490477ddb259999a73b2e..644b4741ef746594242197d7362817b8f66841a4 100644 (file)
@@ -735,31 +735,13 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
 static int netwave_pcmcia_config(struct pcmcia_device *link) {
     struct net_device *dev = link->priv;
     netwave_private *priv = netdev_priv(dev);
-    tuple_t tuple;
-    cisparse_t parse;
     int i, j, last_ret, last_fn;
-    u_char buf[64];
     win_req_t req;
     memreq_t mem;
     u_char __iomem *ramBase = NULL;
 
     DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link);
 
-    /*
-      This reads the card's CONFIG tuple to find its configuration
-      registers.
-    */
-    tuple.Attributes = 0;
-    tuple.TupleData = (cisdata_t *) buf;
-    tuple.TupleDataMax = 64;
-    tuple.TupleOffset = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     /*
      *  Try allocating IO ports.  This tries a few fixed addresses.
      *  If you want, you can also read the card's config table to
index 336cabac13b392a584259f2a045bc7e673275be1..936c888e03e15aa44352285ea3c952a15ea587e0 100644 (file)
@@ -980,9 +980,11 @@ static void print_linkstatus(struct net_device *dev, u16 status)
 }
 
 /* Search scan results for requested BSSID, join it if found */
-static void orinoco_join_ap(struct net_device *dev)
+static void orinoco_join_ap(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, join_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        int err;
        unsigned long flags;
@@ -1055,9 +1057,11 @@ static void orinoco_join_ap(struct net_device *dev)
 }
 
 /* Send new BSSID to userspace */
-static void orinoco_send_wevents(struct net_device *dev)
+static void orinoco_send_wevents(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, wevent_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        union iwreq_data wrqu;
        int err;
@@ -1864,9 +1868,11 @@ __orinoco_set_multicast_list(struct net_device *dev)
 
 /* This must be called from user context, without locks held - use
  * schedule_work() */
-static void orinoco_reset(struct net_device *dev)
+static void orinoco_reset(struct work_struct *work)
 {
-       struct orinoco_private *priv = netdev_priv(dev);
+       struct orinoco_private *priv =
+               container_of(work, struct orinoco_private, reset_work);
+       struct net_device *dev = priv->ndev;
        struct hermes *hw = &priv->hw;
        int err;
        unsigned long flags;
@@ -2434,9 +2440,9 @@ struct net_device *alloc_orinocodev(int sizeof_card,
        priv->hw_unavailable = 1; /* orinoco_init() must clear this
                                   * before anything else touches the
                                   * hardware */
-       INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev);
-       INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev);
-       INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev);
+       INIT_WORK(&priv->reset_work, orinoco_reset);
+       INIT_WORK(&priv->join_work, orinoco_join_ap);
+       INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
 
        netif_carrier_off(dev);
        priv->last_linkstatus = 0xffff;
@@ -3608,7 +3614,7 @@ static int orinoco_ioctl_reset(struct net_device *dev,
                printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
 
                /* Firmware reset */
-               orinoco_reset(dev);
+               orinoco_reset(&priv->reset_work);
        } else {
                printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
 
@@ -4154,7 +4160,7 @@ static int orinoco_ioctl_commit(struct net_device *dev,
                return 0;
 
        if (priv->broken_disableport) {
-               orinoco_reset(dev);
+               orinoco_reset(&priv->reset_work);
                return 0;
        }
 
index bc14689cbf24fb209b1fc38cdd875f3f7aba9012..d08ae8d2726c6ac42e51b47ac5534af37d4329da 100644 (file)
@@ -178,21 +178,6 @@ orinoco_cs_config(struct pcmcia_device *link)
        cisparse_t parse;
        void __iomem *mem;
 
-       /*
-        * This reads the card's CONFIG tuple to find its
-        * configuration registers.
-        */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        /* Look up the current Vcc */
        CS_CHECK(GetConfigurationInfo,
                 pcmcia_get_configuration_info(link, &conf));
@@ -211,6 +196,10 @@ orinoco_cs_config(struct pcmcia_device *link)
         * and most client drivers will only use the CIS to fill in
         * implementation-defined details.
         */
+       tuple.Attributes = 0;
+       tuple.TupleData = buf;
+       tuple.TupleDataMax = sizeof(buf);
+       tuple.TupleOffset = 0;
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
        while (1) {
index 4a20e45de3cab20caced617449070d994fa5272e..96606ed100761bca80fc5a04524f2b5fc0868959 100644 (file)
@@ -157,8 +157,9 @@ prism54_mib_init(islpci_private *priv)
  * schedule_work(), thus we can as well use sleeping semaphore
  * locking */
 void
-prism54_update_stats(islpci_private *priv)
+prism54_update_stats(struct work_struct *work)
 {
+       islpci_private *priv = container_of(work, islpci_private, stats_work);
        char *data;
        int j;
        struct obj_bss bss, *bss2;
@@ -2140,11 +2141,9 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
                                         struct islpci_bss_wpa_ie, list);
                        list_del(&bss->list);
                } else {
-                       bss = kmalloc(sizeof (*bss), GFP_ATOMIC);
-                       if (bss != NULL) {
+                       bss = kzalloc(sizeof (*bss), GFP_ATOMIC);
+                       if (bss != NULL)
                                priv->num_bss_wpa++;
-                               memset(bss, 0, sizeof (*bss));
-                       }
                }
                if (bss != NULL) {
                        memcpy(bss->bssid, bssid, ETH_ALEN);
@@ -2493,9 +2492,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
  * interrupt context, no locks held.
  */
 void
-prism54_process_trap(void *data)
+prism54_process_trap(struct work_struct *work)
 {
-       struct islpci_mgmtframe *frame = data;
+       struct islpci_mgmtframe *frame =
+               container_of(work, struct islpci_mgmtframe, ws);
        struct net_device *ndev = frame->ndev;
        enum oid_num_t n = mgt_oidtonum(frame->header->oid);
 
@@ -2684,11 +2684,10 @@ prism2_ioctl_set_generic_element(struct net_device *ndev,
                return -EINVAL;
 
        alen = sizeof(*attach) + len;
-       attach = kmalloc(alen, GFP_KERNEL);
+       attach = kzalloc(alen, GFP_KERNEL);
        if (attach == NULL)
                return -ENOMEM;
 
-       memset(attach, 0, alen);
 #define WLAN_FC_TYPE_MGMT 0
 #define WLAN_FC_STYPE_ASSOC_REQ 0
 #define WLAN_FC_STYPE_REASSOC_REQ 2
index e8183d30c52eca1e2c9e1a6f21caa29d5feae2ed..bcfbfb9281d27c3e23dfc3de3619923c07304bd0 100644 (file)
 void prism54_mib_init(islpci_private *);
 
 struct iw_statistics *prism54_get_wireless_stats(struct net_device *);
-void prism54_update_stats(islpci_private *);
+void prism54_update_stats(struct work_struct *);
 
 void prism54_acl_init(struct islpci_acl *);
 void prism54_acl_clean(struct islpci_acl *);
 
-void prism54_process_trap(void *);
+void prism54_process_trap(struct work_struct *);
 
 void prism54_wpa_bss_ie_init(islpci_private *priv);
 void prism54_wpa_bss_ie_clean(islpci_private *priv);
index 1e0603ca436c01b1c2ac49b42537307b251677b7..f057fd9fcd79edcf2288451ec21c51aa09ad3f6b 100644 (file)
@@ -860,11 +860,10 @@ islpci_setup(struct pci_dev *pdev)
        priv->state_off = 1;
 
        /* initialize workqueue's */
-       INIT_WORK(&priv->stats_work,
-                 (void (*)(void *)) prism54_update_stats, priv);
+       INIT_WORK(&priv->stats_work, prism54_update_stats);
        priv->stats_timestamp = 0;
 
-       INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv);
+       INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
        priv->reset_task_pending = 0;
 
        /* allocate various memory areas */
index 676d83813dc8526699c732d3eccc36d583ad536f..b1122912ee2d25dcaffe7024bd78c00b5bcc6b75 100644 (file)
@@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv)
 }
 
 void
-islpci_do_reset_and_wake(void *data)
+islpci_do_reset_and_wake(struct work_struct *work)
 {
-       islpci_private *priv = data;
+       islpci_private *priv = container_of(work, islpci_private, reset_task);
 
        islpci_reset(priv, 1);
        priv->reset_task_pending = 0;
index 26789454067ccbc40193e3bf27f5dbae1d171487..5bf820defbd01b73b906d295bfe3ce40f1e02e80 100644 (file)
@@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *);
 int islpci_eth_transmit(struct sk_buff *, struct net_device *);
 int islpci_eth_receive(islpci_private *);
 void islpci_eth_tx_timeout(struct net_device *);
-void islpci_do_reset_and_wake(void *data);
+void islpci_do_reset_and_wake(struct work_struct *);
 
 #endif                         /* _ISL_GEN_H */
index 036a875054c99fa5041c0f23181962ec0e67558c..2246f7930b4edfd45aeac66a5bdf4c194ce91a96 100644 (file)
@@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev)
 
                        /* Create work to handle trap out of interrupt
                         * context. */
-                       INIT_WORK(&frame->ws, prism54_process_trap, frame);
+                       INIT_WORK(&frame->ws, prism54_process_trap);
                        schedule_work(&frame->ws);
 
                } else {
index fbc52b6a30247f100d500daed5add811248de452..e6cf9df2c20655699618bd3f18595a99862772e3 100644 (file)
@@ -235,12 +235,10 @@ mgt_init(islpci_private *priv)
 {
        int i;
 
-       priv->mib = kmalloc(OID_NUM_LAST * sizeof (void *), GFP_KERNEL);
+       priv->mib = kcalloc(OID_NUM_LAST, sizeof (void *), GFP_KERNEL);
        if (!priv->mib)
                return -ENOMEM;
 
-       memset(priv->mib, 0, OID_NUM_LAST * sizeof (void *));
-
        /* Alloc the cache */
        for (i = 0; i < OID_NUM_LAST; i++) {
                if (isl_oid[i].flags & OID_FLAG_CACHED) {
index 7fbfc9e41d07b804dea4c7704d95da10f459f69c..88e10c9bc4ac2ee4985fa3f9e5f63d7d1fcd1846 100644 (file)
@@ -408,11 +408,8 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
 #define MAX_TUPLE_SIZE 128
 static int ray_config(struct pcmcia_device *link)
 {
-    tuple_t tuple;
-    cisparse_t parse;
     int last_fn = 0, last_ret = 0;
     int i;
-    u_char buf[MAX_TUPLE_SIZE];
     win_req_t req;
     memreq_t mem;
     struct net_device *dev = (struct net_device *)link->priv;
@@ -420,29 +417,12 @@ static int ray_config(struct pcmcia_device *link)
 
     DEBUG(1, "ray_config(0x%p)\n", link);
 
-    /* This reads the card's CONFIG tuple to find its configuration regs */
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = MAX_TUPLE_SIZE;
-    tuple.TupleOffset = 0;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     /* Determine card type and firmware version */
-    buf[0] = buf[MAX_TUPLE_SIZE - 1] = 0;
-    tuple.DesiredTuple = CISTPL_VERS_1;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    tuple.TupleData = buf;
-    tuple.TupleDataMax = MAX_TUPLE_SIZE;
-    tuple.TupleOffset = 2;
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-
-    for (i=0; i<tuple.TupleDataLen - 4; i++) 
-        if (buf[i] == 0) buf[i] = ' ';
-    printk(KERN_INFO "ray_cs Detected: %s\n",buf);
+    printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
+          link->prod_id[0] ? link->prod_id[0] : " ",
+          link->prod_id[1] ? link->prod_id[1] : " ",
+          link->prod_id[2] ? link->prod_id[2] : " ",
+          link->prod_id[3] ? link->prod_id[3] : " ");
 
     /* Now allocate an interrupt line.  Note that this does not
        actually assign a handler to the interrupt.
index bcc7038130f69a1355871e8abc5884ff2d386863..cf2d1486b01d811ca021c18f539a3de0533a5bc8 100644 (file)
@@ -647,21 +647,6 @@ spectrum_cs_config(struct pcmcia_device *link)
        cisparse_t parse;
        void __iomem *mem;
 
-       /*
-        * This reads the card's CONFIG tuple to find its
-        * configuration registers.
-        */
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        /* Look up the current Vcc */
        CS_CHECK(GetConfigurationInfo,
                 pcmcia_get_configuration_info(link, &conf));
@@ -681,6 +666,10 @@ spectrum_cs_config(struct pcmcia_device *link)
         * implementation-defined details.
         */
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+       tuple.Attributes = 0;
+       tuple.TupleData = buf;
+       tuple.TupleDataMax = sizeof(buf);
+       tuple.TupleOffset = 0;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
        while (1) {
                cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
index aafb301041b124f03d672cedf2c30f4fb3472a47..233d906c08f0eed014e14a3395fe31ff562cd7b4 100644 (file)
@@ -3939,11 +3939,8 @@ wv_hw_reset(struct net_device *  dev)
 static inline int
 wv_pcmcia_config(struct pcmcia_device *        link)
 {
-  tuple_t              tuple;
-  cisparse_t           parse;
   struct net_device *  dev = (struct net_device *) link->priv;
   int                  i;
-  u_char               buf[64];
   win_req_t            req;
   memreq_t             mem;
   net_local *          lp = netdev_priv(dev);
@@ -3953,36 +3950,6 @@ wv_pcmcia_config(struct pcmcia_device *  link)
   printk(KERN_DEBUG "->wv_pcmcia_config(0x%p)\n", link);
 #endif
 
-  /*
-   * This reads the card's CONFIG tuple to find its configuration
-   * registers.
-   */
-  do
-    {
-      tuple.Attributes = 0;
-      tuple.DesiredTuple = CISTPL_CONFIG;
-      i = pcmcia_get_first_tuple(link, &tuple);
-      if(i != CS_SUCCESS)
-       break;
-      tuple.TupleData = (cisdata_t *)buf;
-      tuple.TupleDataMax = 64;
-      tuple.TupleOffset = 0;
-      i = pcmcia_get_tuple_data(link, &tuple);
-      if(i != CS_SUCCESS)
-       break;
-      i = pcmcia_parse_tuple(link, &tuple, &parse);
-      if(i != CS_SUCCESS)
-       break;
-      link->conf.ConfigBase = parse.config.base;
-      link->conf.Present = parse.config.rmask[0];
-    }
-  while(0);
-  if(i != CS_SUCCESS)
-    {
-      cs_error(link, ParseTuple, i);
-      return FALSE;
-    }
-
   do
     {
       i = pcmcia_request_io(link, &link->io);
index 5b98a7876982a528e56f819016457bc4359a6d54..583e0d655a986897df28e5319ec238089681bb6c 100644 (file)
@@ -1966,25 +1966,10 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
  */
 static int wl3501_config(struct pcmcia_device *link)
 {
-       tuple_t tuple;
-       cisparse_t parse;
        struct net_device *dev = link->priv;
        int i = 0, j, last_fn, last_ret;
-       unsigned char bf[64];
        struct wl3501_card *this;
 
-       /* This reads the card's CONFIG tuple to find its config registers. */
-       tuple.Attributes        = 0;
-       tuple.DesiredTuple      = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       tuple.TupleData         = bf;
-       tuple.TupleDataMax      = sizeof(bf);
-       tuple.TupleOffset       = 0;
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase   = parse.config.base;
-       link->conf.Present      = parse.config.rmask[0];
-
        /* Try allocating IO ports.  This tries a few fixed addresses.  If you
         * want, you can also read the card's config table to pick addresses --
         * see the serial driver for an example. */
index 8be99ebbe1cd9bfbc87a0e38cbafb1c078951318..77e11ddad836d135f444d53f567262686d408c96 100644 (file)
@@ -1673,3 +1673,16 @@ int zd_rfwritev_cr_locked(struct zd_chip *chip,
 
        return 0;
 }
+
+int zd_chip_set_multicast_hash(struct zd_chip *chip,
+                              struct zd_mc_hash *hash)
+{
+       struct zd_ioreq32 ioreqs[] = {
+               { CR_GROUP_HASH_P1, hash->low },
+               { CR_GROUP_HASH_P2, hash->high },
+       };
+
+       dev_dbg_f(zd_chip_dev(chip), "hash l 0x%08x h 0x%08x\n",
+               ioreqs[0].value, ioreqs[1].value);
+       return zd_iowrite32a(chip, ioreqs, ARRAY_SIZE(ioreqs));
+}
index ca892b9a6448b9abe16c9228c2dc2ee6a787d2ba..a4e3cee9b59d2f94b604f358535cc54dcb8ebe15 100644 (file)
 #define CR_BSSID_P1                    CTL_REG(0x0618)
 #define CR_BSSID_P2                    CTL_REG(0x061C)
 #define CR_BCN_PLCP_CFG                        CTL_REG(0x0620)
+
+/* Group hash table for filtering incoming packets.
+ *
+ * The group hash table is 64 bit large and split over two parts. The first
+ * part is the lower part. The upper 6 bits of the last byte of the target
+ * address are used as index. Packets are received if the hash table bit is
+ * set. This is used for multicast handling, but for broadcasts (address
+ * ff:ff:ff:ff:ff:ff) the highest bit in the second table must also be set.
+ */
 #define CR_GROUP_HASH_P1               CTL_REG(0x0624)
 #define CR_GROUP_HASH_P2               CTL_REG(0x0628)
-#define CR_RX_TIMEOUT                  CTL_REG(0x062C)
 
+#define CR_RX_TIMEOUT                  CTL_REG(0x062C)
 /* Basic rates supported by the BSS. When producing ACK or CTS messages, the
  * device will use a rate in this table that is less than or equal to the rate
  * of the incoming frame which prompted the response */
@@ -864,4 +873,36 @@ u8 zd_rx_strength_percent(u8 rssi);
 
 u16 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
 
+struct zd_mc_hash {
+       u32 low;
+       u32 high;
+};
+
+static inline void zd_mc_clear(struct zd_mc_hash *hash)
+{
+       hash->low = 0;
+       /* The interfaces must always received broadcasts.
+        * The hash of the broadcast address ff:ff:ff:ff:ff:ff is 63.
+        */
+       hash->high = 0x80000000;
+}
+
+static inline void zd_mc_add_all(struct zd_mc_hash *hash)
+{
+       hash->low = hash->high = 0xffffffff;
+}
+
+static inline void zd_mc_add_addr(struct zd_mc_hash *hash, u8 *addr)
+{
+       unsigned int i = addr[5] >> 2;
+       if (i < 32) {
+               hash->low |= 1 << i;
+       } else {
+               hash->high |= 1 << (i-32);
+       }
+}
+
+int zd_chip_set_multicast_hash(struct zd_chip *chip,
+                              struct zd_mc_hash *hash);
+
 #endif /* _ZD_CHIP_H */
index 2696f95b92781d94c67b0258b91699afe7aee685..00ca704ece35b65f0951404d5d1f037869d9921b 100644 (file)
 
 static void ieee_init(struct ieee80211_device *ieee);
 static void softmac_init(struct ieee80211softmac_device *sm);
-static void set_rts_cts_work(void *d);
-static void set_basic_rates_work(void *d);
+static void set_rts_cts_work(struct work_struct *work);
+static void set_basic_rates_work(struct work_struct *work);
 
 static void housekeeping_init(struct zd_mac *mac);
 static void housekeeping_enable(struct zd_mac *mac);
 static void housekeeping_disable(struct zd_mac *mac);
 
+static void set_multicast_hash_handler(struct work_struct *work);
+
 int zd_mac_init(struct zd_mac *mac,
                struct net_device *netdev,
                struct usb_interface *intf)
@@ -48,13 +50,14 @@ int zd_mac_init(struct zd_mac *mac,
        memset(mac, 0, sizeof(*mac));
        spin_lock_init(&mac->lock);
        mac->netdev = netdev;
-       INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac);
-       INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac);
+       INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
+       INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work);
 
        ieee_init(ieee);
        softmac_init(ieee80211_priv(netdev));
        zd_chip_init(&mac->chip, netdev, intf);
        housekeeping_init(mac);
+       INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
        return 0;
 }
 
@@ -136,6 +139,7 @@ out:
 
 void zd_mac_clear(struct zd_mac *mac)
 {
+       flush_workqueue(zd_workqueue);
        zd_chip_clear(&mac->chip);
        ZD_ASSERT(!spin_is_locked(&mac->lock));
        ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
@@ -256,6 +260,43 @@ int zd_mac_set_mac_address(struct net_device *netdev, void *p)
        return 0;
 }
 
+static void set_multicast_hash_handler(struct work_struct *work)
+{
+       struct zd_mac *mac = container_of(work, struct zd_mac,
+                                         set_multicast_hash_work);
+       struct zd_mc_hash hash;
+
+       spin_lock_irq(&mac->lock);
+       hash = mac->multicast_hash;
+       spin_unlock_irq(&mac->lock);
+
+       zd_chip_set_multicast_hash(&mac->chip, &hash);
+}
+
+void zd_mac_set_multicast_list(struct net_device *dev)
+{
+       struct zd_mc_hash hash;
+       struct zd_mac *mac = zd_netdev_mac(dev);
+       struct dev_mc_list *mc;
+       unsigned long flags;
+
+       if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
+               zd_mc_add_all(&hash);
+       } else {
+               zd_mc_clear(&hash);
+               for (mc = dev->mc_list; mc; mc = mc->next) {
+                       dev_dbg_f(zd_mac_dev(mac), "mc addr " MAC_FMT "\n",
+                                 MAC_ARG(mc->dmi_addr));
+                       zd_mc_add_addr(&hash, mc->dmi_addr);
+               }
+       }
+
+       spin_lock_irqsave(&mac->lock, flags);
+       mac->multicast_hash = hash;
+       spin_unlock_irqrestore(&mac->lock, flags);
+       queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+}
+
 int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain)
 {
        int r;
@@ -366,9 +407,10 @@ static void try_enable_tx(struct zd_mac *mac)
        spin_unlock_irqrestore(&mac->lock, flags);
 }
 
-static void set_rts_cts_work(void *d)
+static void set_rts_cts_work(struct work_struct *work)
 {
-       struct zd_mac *mac = d;
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, set_rts_cts_work.work);
        unsigned long flags;
        u8 rts_rate;
        unsigned int short_preamble;
@@ -387,9 +429,10 @@ static void set_rts_cts_work(void *d)
        try_enable_tx(mac);
 }
 
-static void set_basic_rates_work(void *d)
+static void set_basic_rates_work(struct work_struct *work)
 {
-       struct zd_mac *mac = d;
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, set_basic_rates_work.work);
        unsigned long flags;
        u16 basic_rates;
 
@@ -467,12 +510,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes)
        if (need_set_rts_cts && !mac->updating_rts_rate) {
                mac->updating_rts_rate = 1;
                netif_stop_queue(mac->netdev);
-               queue_work(zd_workqueue, &mac->set_rts_cts_work);
+               queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0);
        }
        if (need_set_rates && !mac->updating_basic_rates) {
                mac->updating_basic_rates = 1;
                netif_stop_queue(mac->netdev);
-               queue_work(zd_workqueue, &mac->set_basic_rates_work);
+               queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work,
+                                  0);
        }
        spin_unlock_irqrestore(&mac->lock, flags);
 }
@@ -615,6 +659,9 @@ int zd_mac_get_range(struct zd_mac *mac, struct iw_range *range)
        range->we_version_compiled = WIRELESS_EXT;
        range->we_version_source = 20;
 
+       range->enc_capa = IW_ENC_CAPA_WPA |  IW_ENC_CAPA_WPA2 |
+                         IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
        ZD_ASSERT(!irqs_disabled());
        spin_lock_irq(&mac->lock);
        regdomain = mac->regdomain;
@@ -927,7 +974,8 @@ static int is_data_packet_for_us(struct ieee80211_device *ieee,
        }
 
        return memcmp(hdr->addr1, netdev->dev_addr, ETH_ALEN) == 0 ||
-              is_multicast_ether_addr(hdr->addr1) ||
+              (is_multicast_ether_addr(hdr->addr1) &&
+               memcmp(hdr->addr3, netdev->dev_addr, ETH_ALEN) != 0) ||
               (netdev->flags & IFF_PROMISC);
 }
 
@@ -1059,10 +1107,8 @@ int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length)
        memcpy(skb_put(skb, length), buffer, length);
 
        r = ieee80211_rx(ieee, skb, &stats);
-       if (!r) {
-               ZD_ASSERT(in_irq());
-               dev_kfree_skb_irq(skb);
-       }
+       if (!r)
+               dev_kfree_skb_any(skb);
        return 0;
 }
 
@@ -1182,9 +1228,10 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
 
 #define LINK_LED_WORK_DELAY HZ
 
-static void link_led_handler(void *p)
+static void link_led_handler(struct work_struct *work)
 {
-       struct zd_mac *mac = p;
+       struct zd_mac *mac =
+               container_of(work, struct zd_mac, housekeeping.link_led_work.work);
        struct zd_chip *chip = &mac->chip;
        struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev);
        int is_associated;
@@ -1205,7 +1252,7 @@ static void link_led_handler(void *p)
 
 static void housekeeping_init(struct zd_mac *mac)
 {
-       INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac);
+       INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler);
 }
 
 static void housekeeping_enable(struct zd_mac *mac)
index 5dcfb251f02ec6b2d2eb9c5cc5641300ac5b2648..f0cf05dc7d3e0c9e5d36b32c348b2bac38c76c04 100644 (file)
@@ -119,7 +119,7 @@ struct rx_status {
 #define ZD_RX_ERROR                    0x80
 
 struct housekeeping {
-       struct work_struct link_led_work;
+       struct delayed_work link_led_work;
 };
 
 #define ZD_MAC_STATS_BUFFER_SIZE 16
@@ -133,8 +133,10 @@ struct zd_mac {
        struct iw_statistics iw_stats;
 
        struct housekeeping housekeeping;
-       struct work_struct set_rts_cts_work;
-       struct work_struct set_basic_rates_work;
+       struct work_struct set_multicast_hash_work;
+       struct zd_mc_hash multicast_hash;
+       struct delayed_work set_rts_cts_work;
+       struct delayed_work set_basic_rates_work;
 
        unsigned int stats_count;
        u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
@@ -189,6 +191,7 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type);
 int zd_mac_open(struct net_device *netdev);
 int zd_mac_stop(struct net_device *netdev);
 int zd_mac_set_mac_address(struct net_device *dev, void *p);
+void zd_mac_set_multicast_list(struct net_device *netdev);
 
 int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length);
 
index 60f1b0f6d45b81d50c4fab4cb3a5ec6d9913c6fd..8bda48de31ef7f58da1a329d4a017a23b716b688 100644 (file)
@@ -242,7 +242,7 @@ struct net_device *zd_netdev_alloc(struct usb_interface *intf)
        netdev->open = zd_mac_open;
        netdev->stop = zd_mac_stop;
        /* netdev->get_stats = */
-       /* netdev->set_multicast_list = */
+       netdev->set_multicast_list = zd_mac_set_multicast_list;
        netdev->set_mac_address = zd_mac_set_mac_address;
        netdev->wireless_handlers = &iw_handler_def;
        /* netdev->ethtool_ops = */
index fc4bc9b94c748d2e1736913f8d4978bc07cfcb49..a83c3db7d18fb81a1cd58347ea22ce12a8f9b29c 100644 (file)
@@ -29,7 +29,7 @@
 
 struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
 
-static void wq_sync_buffer(void *);
+static void wq_sync_buffer(struct work_struct *work);
 
 #define DEFAULT_TIMER_EXPIRE (HZ / 10)
 static int work_enabled;
@@ -65,7 +65,7 @@ int alloc_cpu_buffers(void)
                b->sample_received = 0;
                b->sample_lost_overflow = 0;
                b->cpu = i;
-               INIT_WORK(&b->work, wq_sync_buffer, b);
+               INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
        }
        return 0;
 
@@ -282,9 +282,10 @@ void oprofile_add_trace(unsigned long pc)
  * By using schedule_delayed_work_on and then schedule_delayed_work
  * we guarantee this will stay on the correct cpu
  */
-static void wq_sync_buffer(void * data)
+static void wq_sync_buffer(struct work_struct *work)
 {
-       struct oprofile_cpu_buffer * b = data;
+       struct oprofile_cpu_buffer * b =
+               container_of(work, struct oprofile_cpu_buffer, work.work);
        if (b->cpu != smp_processor_id()) {
                printk("WQ on CPU%d, prefer CPU%d\n",
                       smp_processor_id(), b->cpu);
index 09abb80e05701afaa3c15ede0996ab8bda316915..49900d9e3235e613cde4e7387a3ce0acd9a10f45 100644 (file)
@@ -43,7 +43,7 @@ struct oprofile_cpu_buffer {
        unsigned long sample_lost_overflow;
        unsigned long backtrace_aborted;
        int cpu;
-       struct work_struct work;
+       struct delayed_work work;
 } ____cacheline_aligned;
 
 extern struct oprofile_cpu_buffer cpu_buffer[];
index b953d5907c05aac311151b49cc1ecc5ff78ad1a4..e60b4bf6bae8f754e48daa268846407b0404096d 100644 (file)
@@ -166,14 +166,6 @@ static int parport_config(struct pcmcia_device *link)
     
     tuple.TupleData = (cisdata_t *)buf;
     tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
-    tuple.Attributes = 0;
-    tuple.DesiredTuple = CISTPL_CONFIG;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-    link->conf.Present = parse.config.rmask[0];
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -263,6 +255,7 @@ void parport_cs_release(struct pcmcia_device *link)
 
 static struct pcmcia_device_id parport_ids[] = {
        PCMCIA_DEVICE_FUNC_ID(3),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
        PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003),
        PCMCIA_DEVICE_NULL
 };
index 39c96641bc72cdf0bdc6a9d71a192a99a4f30e8b..b61c17b3e298a8f3f81121a1d46e207cc7ace60a 100644 (file)
@@ -1975,7 +1975,7 @@ static int __devinit parport_ECPPS2_supported(struct parport *pb){return 0;}
 /* --- IRQ detection -------------------------------------- */
 
 /* Only if supports ECP mode */
-static int __devinit programmable_irq_support(struct parport *pb)
+static int programmable_irq_support(struct parport *pb)
 {
        int irq, intrLine;
        unsigned char oecr = inb (ECONTROL (pb));
@@ -1992,7 +1992,7 @@ static int __devinit programmable_irq_support(struct parport *pb)
        return irq;
 }
 
-static int __devinit irq_probe_ECP(struct parport *pb)
+static int irq_probe_ECP(struct parport *pb)
 {
        int i;
        unsigned long irqs;
@@ -2020,7 +2020,7 @@ static int __devinit irq_probe_ECP(struct parport *pb)
  * This detection seems that only works in National Semiconductors
  * This doesn't work in SMC, LGS, and Winbond 
  */
-static int __devinit irq_probe_EPP(struct parport *pb)
+static int irq_probe_EPP(struct parport *pb)
 {
 #ifndef ADVANCED_DETECT
        return PARPORT_IRQ_NONE;
@@ -2059,7 +2059,7 @@ static int __devinit irq_probe_EPP(struct parport *pb)
 #endif /* Advanced detection */
 }
 
-static int __devinit irq_probe_SPP(struct parport *pb)
+static int irq_probe_SPP(struct parport *pb)
 {
        /* Don't even try to do this. */
        return PARPORT_IRQ_NONE;
@@ -2747,6 +2747,7 @@ enum parport_pc_pci_cards {
        titan_1284p2,
        avlab_1p,
        avlab_2p,
+       oxsemi_952,
        oxsemi_954,
        oxsemi_840,
        aks_0100,
@@ -2822,6 +2823,7 @@ static struct parport_pc_pci {
        /* avlab_2p             */      { 2, { { 0, 1}, { 2, 3 },} },
        /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
         * and 840 locks up if you write 1 to bit 2! */
+       /* oxsemi_952 */                { 1, { { 0, 1 }, } },
        /* oxsemi_954 */                { 1, { { 0, -1 }, } },
        /* oxsemi_840 */                { 1, { { 0, -1 }, } },
        /* aks_0100 */                  { 1, { { 0, -1 }, } },
@@ -2895,6 +2897,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
        /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
        { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
        { 0x14db, 0x2121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_2p},
+       { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952PP,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_952 },
        { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954PP,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
        { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
index ea2087c34149f23244401e480b9cf491693b8a7c..50757695844fa1de842b28a65651c2f646a388c0 100644 (file)
@@ -70,7 +70,7 @@ struct slot {
        struct hotplug_slot *hotplug_slot;
        struct list_head        slot_list;
        char name[SLOT_NAME_SIZE];
-       struct work_struct work;        /* work for button event */
+       struct delayed_work work;       /* work for button event */
        struct mutex lock;
 };
 
@@ -187,7 +187,7 @@ extern int  shpchp_configure_device(struct slot *p_slot);
 extern int     shpchp_unconfigure_device(struct slot *p_slot);
 extern void    shpchp_remove_ctrl_files(struct controller *ctrl);
 extern void    cleanup_slots(struct controller *ctrl);
-extern void    queue_pushbutton_work(void *data);
+extern void    queue_pushbutton_work(struct work_struct *work);
 
 
 #ifdef CONFIG_ACPI
index 235c18a22393d04e0ba86ec5f511d086149f6d75..4eac85b3d90e0a91f449101cf6aed4f1338238be 100644 (file)
@@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl)
                        goto error_info;
 
                slot->number = sun;
-               INIT_WORK(&slot->work, queue_pushbutton_work, slot);
+               INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work);
 
                /* register this slot with the hotplug pci core */
                hotplug_slot->private = slot;
index c39901dbff202ee46cb04096e639060088e017f9..158ac78360961c80d02c03e027f03cb12c9d556f 100644 (file)
@@ -36,7 +36,7 @@
 #include "../pci.h"
 #include "shpchp.h"
 
-static void interrupt_event_handler(void *data);
+static void interrupt_event_handler(struct work_struct *work);
 static int shpchp_enable_slot(struct slot *p_slot);
 static int shpchp_disable_slot(struct slot *p_slot);
 
@@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
 
        info->event_type = event_type;
        info->p_slot = p_slot;
-       INIT_WORK(&info->work, interrupt_event_handler, info);
+       INIT_WORK(&info->work, interrupt_event_handler);
 
        schedule_work(&info->work);
 
@@ -408,9 +408,10 @@ struct pushbutton_work_info {
  * Handles all pending events and exits.
  *
  */
-static void shpchp_pushbutton_thread(void *data)
+static void shpchp_pushbutton_thread(struct work_struct *work)
 {
-       struct pushbutton_work_info *info = data;
+       struct pushbutton_work_info *info =
+               container_of(work, struct pushbutton_work_info, work);
        struct slot *p_slot = info->p_slot;
 
        mutex_lock(&p_slot->lock);
@@ -436,9 +437,9 @@ static void shpchp_pushbutton_thread(void *data)
        kfree(info);
 }
 
-void queue_pushbutton_work(void *data)
+void queue_pushbutton_work(struct work_struct *work)
 {
-       struct slot *p_slot = data;
+       struct slot *p_slot = container_of(work, struct slot, work.work);
        struct pushbutton_work_info *info;
 
        info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -447,7 +448,7 @@ void queue_pushbutton_work(void *data)
                return;
        }
        info->p_slot = p_slot;
-       INIT_WORK(&info->work, shpchp_pushbutton_thread, info);
+       INIT_WORK(&info->work, shpchp_pushbutton_thread);
 
        mutex_lock(&p_slot->lock);
        switch (p_slot->state) {
@@ -541,9 +542,9 @@ static void handle_button_press_event(struct slot *p_slot)
        }
 }
 
-static void interrupt_event_handler(void *data)
+static void interrupt_event_handler(struct work_struct *work)
 {
-       struct event_info *info = data;
+       struct event_info *info = container_of(work, struct event_info, work);
        struct slot *p_slot = info->p_slot;
 
        mutex_lock(&p_slot->lock);
index c2828a37c2f79fbc07f51107a285330514e6be39..ed3f7e1a563c79c06d9808bfc59c795665fc11c5 100644 (file)
@@ -26,7 +26,7 @@
 
 static DEFINE_SPINLOCK(msi_lock);
 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
-static kmem_cache_t* msi_cachep;
+static struct kmem_cache* msi_cachep;
 
 static int pci_msi_enable = 1;
 
index 04c43ef529ac279dcded06e2f9aad4fda716ff46..55866b6b26fac755d6ba194fedf875965d71b060 100644 (file)
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
        rpc->e_lock = SPIN_LOCK_UNLOCKED;
 
        rpc->rpd = dev;
-       INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev);
+       INIT_WORK(&rpc->dpc_handler, aer_isr);
        rpc->prod_idx = rpc->cons_idx = 0;
        mutex_init(&rpc->rpc_mutex);
        init_waitqueue_head(&rpc->wait_release);
index daf0cad88fc82fa1295ff5d5c4418d4d12553c92..3c0a58f64dd811d455051901ac1da239daa99b3a 100644 (file)
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type;
 extern void aer_enable_rootport(struct aer_rpc *rpc);
 extern void aer_delete_rootport(struct aer_rpc *rpc);
 extern int aer_init(struct pcie_device *dev);
-extern void aer_isr(void *context);
+extern void aer_isr(struct work_struct *work);
 extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
 extern int aer_osc_setup(struct pci_dev *dev);
 
index 1c7e660d6535152272782ab8ce9f876de76aed17..08e13033ced8fc642df5565c1e9cf07867563862 100644 (file)
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device,
 
 /**
  * aer_isr - consume errors detected by root port
- * @context: pointer to a private data of pcie device
+ * @work: definition of this work item
  *
  * Invoked, as DPC, when root port records new detected error
  **/
-void aer_isr(void *context)
+void aer_isr(struct work_struct *work)
 {
-       struct pcie_device *p_device = (struct pcie_device *) context;
-       struct aer_rpc *rpc = get_service_data(p_device);
+       struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+       struct pcie_device *p_device = rpc->rpd;
        struct aer_err_source *e_src;
 
        mutex_lock(&rpc->rpc_mutex);
index 0eeac60042b347052a0dc9d380d9d74de256735c..6a3c1e7289001651b981663878bde47f78678fcb 100644 (file)
@@ -873,6 +873,7 @@ void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
        dev->dev.release = pci_release_dev;
        pci_dev_get(dev);
 
+       set_dev_node(&dev->dev, pcibus_to_node(bus));
        dev->dev.dma_mask = &dev->dma_mask;
        dev->dev.coherent_dma_mask = 0xffffffffull;
 
index 3bcb7dc32995ce01391998d1f1e6d2a4e8162eea..b6746301d9a9cd2f56a3e85472eb4cc21312ad5b 100644 (file)
  * A0..A10 work in each range; A23 indicates I/O space;  A25 is CFRNW;
  * some other bit in {A24,A22..A11} is nREG to flag memory access
  * (vs attributes).  So more than 2KB/region would just be waste.
+ * Note: These are offsets from the physical base address.
  */
-#define        CF_ATTR_PHYS    (AT91_CF_BASE)
-#define        CF_IO_PHYS      (AT91_CF_BASE  + (1 << 23))
-#define        CF_MEM_PHYS     (AT91_CF_BASE  + 0x017ff800)
+#define        CF_ATTR_PHYS    (0)
+#define        CF_IO_PHYS      (1 << 23)
+#define        CF_MEM_PHYS     (0x017ff800)
 
 /*--------------------------------------------------------------------------*/
 
@@ -48,6 +49,8 @@ struct at91_cf_socket {
 
        struct platform_device  *pdev;
        struct at91_cf_data     *board;
+
+       unsigned long           phys_baseaddr;
 };
 
 #define        SZ_2K                   (2 * SZ_1K)
@@ -154,9 +157,8 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
 
        /*
         * Use 16 bit accesses unless/until we need 8-bit i/o space.
-        * Always set CSR4 ... PCMCIA won't always unmap things.
         */
-       csr = at91_sys_read(AT91_SMC_CSR(4)) & ~AT91_SMC_DBW;
+       csr = at91_sys_read(AT91_SMC_CSR(cf->board->chipselect)) & ~AT91_SMC_DBW;
 
        /*
         * NOTE: this CF controller ignores IOIS16, so we can't really do
@@ -168,14 +170,14 @@ static int at91_cf_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
         * some cards only like that way to get at the odd byte, despite
         * CF 3.0 spec table 35 also giving the D8-D15 option.
         */
-       if (!(io->flags & (MAP_16BIT|MAP_AUTOSZ))) {
+       if (!(io->flags & (MAP_16BIT | MAP_AUTOSZ))) {
                csr |= AT91_SMC_DBW_8;
                pr_debug("%s: 8bit i/o bus\n", driver_name);
        } else {
                csr |= AT91_SMC_DBW_16;
                pr_debug("%s: 16bit i/o bus\n", driver_name);
        }
-       at91_sys_write(AT91_SMC_CSR(4), csr);
+       at91_sys_write(AT91_SMC_CSR(cf->board->chipselect), csr);
 
        io->start = cf->socket.io_offset;
        io->stop = io->start + SZ_2K - 1;
@@ -194,11 +196,11 @@ at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map)
 
        cf = container_of(s, struct at91_cf_socket, socket);
 
-       map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT;
+       map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT);
        if (map->flags & MAP_ATTRIB)
-               map->static_start = CF_ATTR_PHYS;
+               map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS;
        else
-               map->static_start = CF_MEM_PHYS;
+               map->static_start = cf->phys_baseaddr + CF_MEM_PHYS;
 
        return 0;
 }
@@ -219,7 +221,6 @@ static int __init at91_cf_probe(struct platform_device *pdev)
        struct at91_cf_socket   *cf;
        struct at91_cf_data     *board = pdev->dev.platform_data;
        struct resource         *io;
-       unsigned int            csa;
        int                     status;
 
        if (!board || !board->det_pin || !board->rst_pin)
@@ -235,33 +236,11 @@ static int __init at91_cf_probe(struct platform_device *pdev)
 
        cf->board = board;
        cf->pdev = pdev;
+       cf->phys_baseaddr = io->start;
        platform_set_drvdata(pdev, cf);
 
-       /* CF takes over CS4, CS5, CS6 */
-       csa = at91_sys_read(AT91_EBI_CSA);
-       at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
-
-       /* nWAIT is _not_ a default setting */
-       (void) at91_set_A_periph(AT91_PIN_PC6, 1);      /*  nWAIT */
-
-       /*
-        * Static memory controller timing adjustments.
-        * REVISIT:  these timings are in terms of MCK cycles, so
-        * when MCK changes (cpufreq etc) so must these values...
-        */
-       at91_sys_write(AT91_SMC_CSR(4),
-                                 AT91_SMC_ACSS_STD
-                               | AT91_SMC_DBW_16
-                               | AT91_SMC_BAT
-                               | AT91_SMC_WSEN
-                               | AT91_SMC_NWS_(32)     /* wait states */
-                               | AT91_SMC_RWSETUP_(6)  /* setup time */
-                               | AT91_SMC_RWHOLD_(4)   /* hold time */
-       );
-
        /* must be a GPIO; ergo must trigger on both edges */
-       status = request_irq(board->det_pin, at91_cf_irq,
-                       IRQF_SAMPLE_RANDOM, driver_name, cf);
+       status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
        if (status < 0)
                goto fail0;
        device_init_wakeup(&pdev->dev, 1);
@@ -282,14 +261,18 @@ static int __init at91_cf_probe(struct platform_device *pdev)
                cf->socket.pci_irq = NR_IRQS + 1;
 
        /* pcmcia layer only remaps "real" memory not iospace */
-       cf->socket.io_offset = (unsigned long) ioremap(CF_IO_PHYS, SZ_2K);
-       if (!cf->socket.io_offset)
+       cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
+       if (!cf->socket.io_offset) {
+               status = -ENXIO;
                goto fail1;
+       }
 
-       /* reserve CS4, CS5, and CS6 regions; but use just CS4 */
+       /* reserve chip-select regions */
        if (!request_mem_region(io->start, io->end + 1 - io->start,
-                               driver_name))
+                               driver_name)) {
+               status = -ENXIO;
                goto fail1;
+       }
 
        pr_info("%s: irqs det #%d, io #%d\n", driver_name,
                board->det_pin, board->irq_pin);
@@ -319,9 +302,7 @@ fail1:
 fail0a:
        device_init_wakeup(&pdev->dev, 0);
        free_irq(board->det_pin, cf);
-       device_init_wakeup(&pdev->dev, 0);
 fail0:
-       at91_sys_write(AT91_EBI_CSA, csa);
        kfree(cf);
        return status;
 }
@@ -331,19 +312,15 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
        struct at91_cf_socket   *cf = platform_get_drvdata(pdev);
        struct at91_cf_data     *board = cf->board;
        struct resource         *io = cf->socket.io[0].res;
-       unsigned int            csa;
 
        pcmcia_unregister_socket(&cf->socket);
        if (board->irq_pin)
                free_irq(board->irq_pin, cf);
-       free_irq(board->det_pin, cf);
        device_init_wakeup(&pdev->dev, 0);
+       free_irq(board->det_pin, cf);
        iounmap((void __iomem *) cf->socket.io_offset);
        release_mem_region(io->start, io->end + 1 - io->start);
 
-       csa = at91_sys_read(AT91_EBI_CSA);
-       at91_sys_write(AT91_EBI_CSA, csa & ~AT91_EBI_CS4A);
-
        kfree(cf);
        return 0;
 }
index f9cd831a3f3159ab18085dae94476976ade6c4fd..606a467403389718a03f21bab041f61fde4caee4 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/pci.h>
 #include <linux/device.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include <asm/system.h>
 #include <asm/irq.h>
 
index d6164cd583fd1a29d9acf400a2ee6c39cafdf7d4..f573ea04db6f577ca78f90f34ce5638842c57bdc 100644 (file)
@@ -135,7 +135,7 @@ int pccard_get_status(struct pcmcia_socket *s, struct pcmcia_device *p_dev, cs_s
 struct pcmcia_callback{
        struct module   *owner;
        int             (*event) (struct pcmcia_socket *s, event_t event, int priority);
-       void            (*requery) (struct pcmcia_socket *s);
+       void            (*requery) (struct pcmcia_socket *s, int new_cis);
        int             (*suspend) (struct pcmcia_socket *s);
        int             (*resume) (struct pcmcia_socket *s);
 };
index 21d83a895b21c2ed9d97c7bcaea5750c7671fb79..7355eb455a881324f68a34dfd4cbe9f91acf619d 100644 (file)
@@ -231,65 +231,6 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
 }
 
 
-#ifdef CONFIG_PCMCIA_LOAD_CIS
-
-/**
- * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
- * @dev - the pcmcia device which needs a CIS override
- * @filename - requested filename in /lib/firmware/
- *
- * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
- * the one provided by the card is broken. The firmware files reside in
- * /lib/firmware/ in userspace.
- */
-static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
-{
-       struct pcmcia_socket *s = dev->socket;
-       const struct firmware *fw;
-       char path[20];
-       int ret=-ENOMEM;
-       cisdump_t *cis;
-
-       if (!filename)
-               return -EINVAL;
-
-       ds_dbg(1, "trying to load firmware %s\n", filename);
-
-       if (strlen(filename) > 14)
-               return -EINVAL;
-
-       snprintf(path, 20, "%s", filename);
-
-       if (request_firmware(&fw, path, &dev->dev) == 0) {
-               if (fw->size >= CISTPL_MAX_CIS_SIZE)
-                       goto release;
-
-               cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
-               if (!cis)
-                       goto release;
-
-               cis->Length = fw->size + 1;
-               memcpy(cis->Data, fw->data, fw->size);
-
-               if (!pcmcia_replace_cis(s, cis))
-                       ret = 0;
-       }
- release:
-       release_firmware(fw);
-
-       return (ret);
-}
-
-#else /* !CONFIG_PCMCIA_LOAD_CIS */
-
-static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
-{
-       return -ENODEV;
-}
-
-#endif
-
-
 /*======================================================================*/
 
 
@@ -309,6 +250,8 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
        driver->drv.bus = &pcmcia_bus_type;
        driver->drv.owner = driver->owner;
 
+       ds_dbg(3, "registering driver %s\n", driver->drv.name);
+
        return driver_register(&driver->drv);
 }
 EXPORT_SYMBOL(pcmcia_register_driver);
@@ -318,6 +261,7 @@ EXPORT_SYMBOL(pcmcia_register_driver);
  */
 void pcmcia_unregister_driver(struct pcmcia_driver *driver)
 {
+       ds_dbg(3, "unregistering driver %s\n", driver->drv.name);
        driver_unregister(&driver->drv);
 }
 EXPORT_SYMBOL(pcmcia_unregister_driver);
@@ -343,23 +287,27 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
 static void pcmcia_release_function(struct kref *ref)
 {
        struct config_t *c = container_of(ref, struct config_t, ref);
+       ds_dbg(1, "releasing config_t\n");
        kfree(c);
 }
 
 static void pcmcia_release_dev(struct device *dev)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
-       ds_dbg(1, "releasing dev %p\n", p_dev);
+       ds_dbg(1, "releasing device %s\n", p_dev->dev.bus_id);
        pcmcia_put_socket(p_dev->socket);
        kfree(p_dev->devname);
        kref_put(&p_dev->function_config->ref, pcmcia_release_function);
        kfree(p_dev);
 }
 
-static void pcmcia_add_pseudo_device(struct pcmcia_socket *s)
+static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
 {
        if (!s->pcmcia_state.device_add_pending) {
+               ds_dbg(1, "scheduling to add %s secondary"
+                      " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
                s->pcmcia_state.device_add_pending = 1;
+               s->pcmcia_state.mfc_pfc = mfc;
                schedule_work(&s->device_add);
        }
        return;
@@ -371,6 +319,7 @@ static int pcmcia_device_probe(struct device * dev)
        struct pcmcia_driver *p_drv;
        struct pcmcia_device_id *did;
        struct pcmcia_socket *s;
+       cistpl_config_t cis_config;
        int ret = 0;
 
        dev = get_device(dev);
@@ -381,15 +330,33 @@ static int pcmcia_device_probe(struct device * dev)
        p_drv = to_pcmcia_drv(dev->driver);
        s = p_dev->socket;
 
+       ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id,
+              p_drv->drv.name);
+
        if ((!p_drv->probe) || (!p_dev->function_config) ||
            (!try_module_get(p_drv->owner))) {
                ret = -EINVAL;
                goto put_dev;
        }
 
+       /* set up some more device information */
+       ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG,
+                               &cis_config);
+       if (!ret) {
+               p_dev->conf.ConfigBase = cis_config.base;
+               p_dev->conf.Present = cis_config.rmask[0];
+       } else {
+               printk(KERN_INFO "pcmcia: could not parse base and rmask0 of CIS\n");
+               p_dev->conf.ConfigBase = 0;
+               p_dev->conf.Present = 0;
+       }
+
        ret = p_drv->probe(p_dev);
-       if (ret)
+       if (ret) {
+               ds_dbg(1, "binding %s to %s failed with %d\n",
+                      p_dev->dev.bus_id, p_drv->drv.name, ret);
                goto put_module;
+       }
 
        /* handle pseudo multifunction devices:
         * there are at most two pseudo multifunction devices.
@@ -400,7 +367,7 @@ static int pcmcia_device_probe(struct device * dev)
        did = p_dev->dev.driver_data;
        if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) &&
            (p_dev->socket->device_count == 1) && (p_dev->device_no == 0))
-               pcmcia_add_pseudo_device(p_dev->socket);
+               pcmcia_add_device_later(p_dev->socket, 0);
 
  put_module:
        if (ret)
@@ -421,8 +388,8 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
        struct pcmcia_device    *tmp;
        unsigned long           flags;
 
-       ds_dbg(2, "unbind_request(%d)\n", s->sock);
-
+       ds_dbg(2, "pcmcia_card_remove(%d) %s\n", s->sock,
+              leftover ? leftover->devname : "");
 
        if (!leftover)
                s->device_count = 0;
@@ -439,6 +406,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
                p_dev->_removed=1;
                spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
+               ds_dbg(2, "unregistering device %s\n", p_dev->dev.bus_id);
                device_unregister(&p_dev->dev);
        }
 
@@ -455,6 +423,8 @@ static int pcmcia_device_remove(struct device * dev)
        p_dev = to_pcmcia_dev(dev);
        p_drv = to_pcmcia_drv(dev->driver);
 
+       ds_dbg(1, "removing device %s\n", p_dev->dev.bus_id);
+
        /* If we're removing the primary module driving a
         * pseudo multi-function card, we need to unbind
         * all devices
@@ -587,8 +557,10 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
 
        mutex_lock(&device_add_lock);
 
-       /* max of 2 devices per card */
-       if (s->device_count == 2)
+       ds_dbg(3, "adding device to %d, function %d\n", s->sock, function);
+
+       /* max of 4 devices per card */
+       if (s->device_count == 4)
                goto err_put;
 
        p_dev = kzalloc(sizeof(struct pcmcia_device), GFP_KERNEL);
@@ -598,8 +570,6 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
        p_dev->socket = s;
        p_dev->device_no = (s->device_count++);
        p_dev->func   = function;
-       if (s->functions <= function)
-               s->functions = function + 1;
 
        p_dev->dev.bus = &pcmcia_bus_type;
        p_dev->dev.parent = s->dev.dev;
@@ -610,8 +580,8 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
        if (!p_dev->devname)
                goto err_free;
        sprintf (p_dev->devname, "pcmcia%s", p_dev->dev.bus_id);
+       ds_dbg(3, "devname is %s\n", p_dev->devname);
 
-       /* compat */
        spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
 
        /*
@@ -631,6 +601,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
        spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
        if (!p_dev->function_config) {
+               ds_dbg(3, "creating config_t for %s\n", p_dev->dev.bus_id);
                p_dev->function_config = kzalloc(sizeof(struct config_t),
                                                 GFP_KERNEL);
                if (!p_dev->function_config)
@@ -674,11 +645,16 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
        unsigned int no_funcs, i;
        int ret = 0;
 
-       if (!(s->resource_setup_done))
+       if (!(s->resource_setup_done)) {
+               ds_dbg(3, "no resources available, delaying card_add\n");
                return -EAGAIN; /* try again, but later... */
+       }
 
-       if (pcmcia_validate_mem(s))
+       if (pcmcia_validate_mem(s)) {
+               ds_dbg(3, "validating mem resources failed, "
+                      "delaying card_add\n");
                return -EAGAIN; /* try again, but later... */
+       }
 
        ret = pccard_validate_cis(s, BIND_FN_ALL, &cisinfo);
        if (ret || !cisinfo.Chains) {
@@ -690,6 +666,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
                no_funcs = mfc.nfn;
        else
                no_funcs = 1;
+       s->functions = no_funcs;
 
        for (i=0; i < no_funcs; i++)
                pcmcia_device_add(s, i);
@@ -698,38 +675,50 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
 }
 
 
-static void pcmcia_delayed_add_pseudo_device(void *data)
+static void pcmcia_delayed_add_device(struct work_struct *work)
 {
-       struct pcmcia_socket *s = data;
-       pcmcia_device_add(s, 0);
+       struct pcmcia_socket *s =
+               container_of(work, struct pcmcia_socket, device_add);
+       ds_dbg(1, "adding additional device to %d\n", s->sock);
+       pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
        s->pcmcia_state.device_add_pending = 0;
+       s->pcmcia_state.mfc_pfc = 0;
 }
 
 static int pcmcia_requery(struct device *dev, void * _data)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
-       if (!p_dev->dev.driver)
+       if (!p_dev->dev.driver) {
+               ds_dbg(1, "update device information for %s\n",
+                      p_dev->dev.bus_id);
                pcmcia_device_query(p_dev);
+       }
 
        return 0;
 }
 
-static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
+static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
 {
-       int no_devices=0;
+       int no_devices = 0;
        int ret = 0;
        unsigned long flags;
 
        /* must be called with skt_mutex held */
+       ds_dbg(0, "re-scanning socket %d\n", skt->sock);
+
        spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
        if (list_empty(&skt->devices_list))
-               no_devices=1;
+               no_devices = 1;
        spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
 
+       /* If this is because of a CIS override, start over */
+       if (new_cis && !no_devices)
+               pcmcia_card_remove(skt, NULL);
+
        /* if no devices were added for this socket yet because of
         * missing resource information or other trouble, we need to
         * do this now. */
-       if (no_devices) {
+       if (no_devices || new_cis) {
                ret = pcmcia_card_add(skt);
                if (ret)
                        return;
@@ -747,6 +736,97 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt)
                printk(KERN_INFO "pcmcia: bus_rescan_devices failed\n");
 }
 
+#ifdef CONFIG_PCMCIA_LOAD_CIS
+
+/**
+ * pcmcia_load_firmware - load CIS from userspace if device-provided is broken
+ * @dev - the pcmcia device which needs a CIS override
+ * @filename - requested filename in /lib/firmware/
+ *
+ * This uses the in-kernel firmware loading mechanism to use a "fake CIS" if
+ * the one provided by the card is broken. The firmware files reside in
+ * /lib/firmware/ in userspace.
+ */
+static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
+{
+       struct pcmcia_socket *s = dev->socket;
+       const struct firmware *fw;
+       char path[20];
+       int ret = -ENOMEM;
+       int no_funcs;
+       int old_funcs;
+       cisdump_t *cis;
+       cistpl_longlink_mfc_t mfc;
+
+       if (!filename)
+               return -EINVAL;
+
+       ds_dbg(1, "trying to load CIS file %s\n", filename);
+
+       if (strlen(filename) > 14) {
+               printk(KERN_WARNING "pcmcia: CIS filename is too long\n");
+               return -EINVAL;
+       }
+
+       snprintf(path, 20, "%s", filename);
+
+       if (request_firmware(&fw, path, &dev->dev) == 0) {
+               if (fw->size >= CISTPL_MAX_CIS_SIZE) {
+                       ret = -EINVAL;
+                       printk(KERN_ERR "pcmcia: CIS override is too big\n");
+                       goto release;
+               }
+
+               cis = kzalloc(sizeof(cisdump_t), GFP_KERNEL);
+               if (!cis) {
+                       ret = -ENOMEM;
+                       goto release;
+               }
+
+               cis->Length = fw->size + 1;
+               memcpy(cis->Data, fw->data, fw->size);
+
+               if (!pcmcia_replace_cis(s, cis))
+                       ret = 0;
+               else {
+                       printk(KERN_ERR "pcmcia: CIS override failed\n");
+                       goto release;
+               }
+
+
+               /* update information */
+               pcmcia_device_query(dev);
+
+               /* does this cis override add or remove functions? */
+               old_funcs = s->functions;
+
+               if (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_LONGLINK_MFC, &mfc))
+                       no_funcs = mfc.nfn;
+               else
+                       no_funcs = 1;
+               s->functions = no_funcs;
+
+               if (old_funcs > no_funcs)
+                       pcmcia_card_remove(s, dev);
+               else if (no_funcs > old_funcs)
+                       pcmcia_add_device_later(s, 1);
+       }
+ release:
+       release_firmware(fw);
+
+       return (ret);
+}
+
+#else /* !CONFIG_PCMCIA_LOAD_CIS */
+
+static inline int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
+{
+       return -ENODEV;
+}
+
+#endif
+
+
 static inline int pcmcia_devmatch(struct pcmcia_device *dev,
                                  struct pcmcia_device_id *did)
 {
@@ -813,11 +893,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
                 * after it has re-checked that there is no possible module
                 * with a prod_id/manf_id/card_id match.
                 */
+               ds_dbg(0, "skipping FUNC_ID match for %s until userspace "
+                      "interaction\n", dev->dev.bus_id);
                if (!dev->allow_func_id_match)
                        return 0;
        }
 
        if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
+               ds_dbg(0, "device %s needs a fake CIS\n", dev->dev.bus_id);
                if (!dev->socket->fake_cis)
                        pcmcia_load_firmware(dev, did->cisfile);
 
@@ -847,13 +930,21 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
 
 #ifdef CONFIG_PCMCIA_IOCTL
        /* matching by cardmgr */
-       if (p_dev->cardmgr == p_drv)
+       if (p_dev->cardmgr == p_drv) {
+               ds_dbg(0, "cardmgr matched %s to %s\n", dev->bus_id,
+                      drv->name);
                return 1;
+       }
 #endif
 
        while (did && did->match_flags) {
-               if (pcmcia_devmatch(p_dev, did))
+               ds_dbg(3, "trying to match %s to %s\n", dev->bus_id,
+                      drv->name);
+               if (pcmcia_devmatch(p_dev, did)) {
+                       ds_dbg(0, "matched %s to %s\n", dev->bus_id,
+                              drv->name);
                        return 1;
+               }
                did++;
        }
 
@@ -1044,6 +1135,8 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
        struct pcmcia_driver *p_drv = NULL;
        int ret = 0;
 
+       ds_dbg(2, "suspending %s\n", dev->bus_id);
+
        if (dev->driver)
                p_drv = to_pcmcia_drv(dev->driver);
 
@@ -1052,12 +1145,18 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
 
        if (p_drv->suspend) {
                ret = p_drv->suspend(p_dev);
-               if (ret)
+               if (ret) {
+                       printk(KERN_ERR "pcmcia: device %s (driver %s) did "
+                              "not want to go to sleep (%d)\n",
+                              p_dev->devname, p_drv->drv.name, ret);
                        goto out;
+               }
        }
 
-       if (p_dev->device_no == p_dev->func)
+       if (p_dev->device_no == p_dev->func) {
+               ds_dbg(2, "releasing configuration for %s\n", dev->bus_id);
                pcmcia_release_configuration(p_dev);
+       }
 
  out:
        if (!ret)
@@ -1072,6 +1171,8 @@ static int pcmcia_dev_resume(struct device * dev)
         struct pcmcia_driver *p_drv = NULL;
        int ret = 0;
 
+       ds_dbg(2, "resuming %s\n", dev->bus_id);
+
        if (dev->driver)
                p_drv = to_pcmcia_drv(dev->driver);
 
@@ -1079,6 +1180,7 @@ static int pcmcia_dev_resume(struct device * dev)
                goto out;
 
        if (p_dev->device_no == p_dev->func) {
+               ds_dbg(2, "requesting configuration for %s\n", dev->bus_id);
                ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
                if (ret)
                        goto out;
@@ -1120,12 +1222,14 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
 
 static int pcmcia_bus_resume(struct pcmcia_socket *skt)
 {
+       ds_dbg(2, "resuming socket %d\n", skt->sock);
        bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback);
        return 0;
 }
 
 static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
 {
+       ds_dbg(2, "suspending socket %d\n", skt->sock);
        if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt,
                             pcmcia_bus_suspend_callback)) {
                pcmcia_bus_resume(skt);
@@ -1246,7 +1350,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev,
        init_waitqueue_head(&socket->queue);
 #endif
        INIT_LIST_HEAD(&socket->devices_list);
-       INIT_WORK(&socket->device_add, pcmcia_delayed_add_pseudo_device, socket);
+       INIT_WORK(&socket->device_add, pcmcia_delayed_add_device);
        memset(&socket->pcmcia_state, 0, sizeof(u8));
        socket->device_count = 0;
 
index 36fdaa58458ca43969bf019d923dc7592256d5d6..3c22ac4625c21ef1bec7c386cf2f2ca0c00beb16 100644 (file)
@@ -398,7 +398,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
 static void pcc_interrupt_wrapper(u_long data)
 {
        debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n");
-       pcc_interrupt(0, NULL, NULL);
+       pcc_interrupt(0, NULL);
        init_timer(&poll_timer);
        poll_timer.expires = jiffies + poll_interval;
        add_timer(&poll_timer);
index 310ede575caacd14dc38f122a4971d9221543bac..d077870c6731ae9ed63161829ee4bcb3fa2ba218 100644 (file)
@@ -594,7 +594,12 @@ static int ds_ioctl(struct inode * inode, struct file * file,
 
     err = ret = 0;
 
-    if (cmd & IOC_IN) __copy_from_user((char *)buf, uarg, size);
+    if (cmd & IOC_IN) {
+       if (__copy_from_user((char *)buf, uarg, size)) {
+           err = -EFAULT;
+           goto free_out;
+       }
+    }
 
     switch (cmd) {
     case DS_ADJUST_RESOURCE_INFO:
index a70f97fdbbdd3085401d90402ce5602f4622ec03..360c24896548610accfb4a5c3a462abb54f1febf 100644 (file)
@@ -581,10 +581,10 @@ static irqreturn_t pd6729_test(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static int pd6729_check_irq(int irq, int flags)
+static int pd6729_check_irq(int irq)
 {
-       if (request_irq(irq, pd6729_test, flags, "x", pd6729_test) != 0)
-               return -1;
+       if (request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", pd6729_test)
+               != 0) return -1;
        free_irq(irq, pd6729_test);
        return 0;
 }
@@ -610,7 +610,7 @@ static u_int __devinit pd6729_isa_scan(void)
 
        /* just find interrupts that aren't in use */
        for (i = 0; i < 16; i++)
-               if ((mask0 & (1 << i)) && (pd6729_check_irq(i, 0) == 0))
+               if ((mask0 & (1 << i)) && (pd6729_check_irq(i) == 0))
                        mask |= (1 << i);
 
        printk(KERN_INFO "pd6729: ISA irqs = ");
index 933cd864a5c95c296844493b65d868b7cf7548aa..b005602d6b5376e92f3b62f6e4a9d8fb1acccfe3 100644 (file)
@@ -188,7 +188,7 @@ static ssize_t pccard_store_resource(struct class_device *dev, const char *buf,
            (s->state & SOCKET_PRESENT) &&
            !(s->state & SOCKET_CARDBUS)) {
                if (try_module_get(s->callback->owner)) {
-                       s->callback->requery(s);
+                       s->callback->requery(s, 0);
                        module_put(s->callback->owner);
                }
        }
@@ -325,7 +325,7 @@ static ssize_t pccard_store_cis(struct kobject *kobj, char *buf, loff_t off, siz
        if ((s->callback) && (s->state & SOCKET_PRESENT) &&
            !(s->state & SOCKET_CARDBUS)) {
                if (try_module_get(s->callback->owner)) {
-                       s->callback->requery(s);
+                       s->callback->requery(s, 1);
                        module_put(s->callback->owner);
                }
        }
index 227600cd636048478126adbb20bc7be1157577b1..91c047a7e635d832d7090bf62522a99e6dca8e34 100644 (file)
@@ -164,9 +164,17 @@ static DEVICE_ATTR(card_id,S_IRUGO,pnp_show_card_ids,NULL);
 
 static int pnp_interface_attach_card(struct pnp_card *card)
 {
-       device_create_file(&card->dev,&dev_attr_name);
-       device_create_file(&card->dev,&dev_attr_card_id);
+       int rc = device_create_file(&card->dev,&dev_attr_name);
+       if (rc) return rc;
+
+       rc = device_create_file(&card->dev,&dev_attr_card_id);
+       if (rc) goto err_name;
+
        return 0;
+
+err_name:
+       device_remove_file(&card->dev,&dev_attr_name);
+       return rc;
 }
 
 /**
@@ -306,16 +314,20 @@ found:
        down_write(&dev->dev.bus->subsys.rwsem);
        dev->card_link = clink;
        dev->dev.driver = &drv->link.driver;
-       if (pnp_bus_type.probe(&dev->dev)) {
-               dev->dev.driver = NULL;
-               dev->card_link = NULL;
-               up_write(&dev->dev.bus->subsys.rwsem);
-               return NULL;
-       }
-       device_bind_driver(&dev->dev);
+       if (pnp_bus_type.probe(&dev->dev))
+               goto err_out;
+       if (device_bind_driver(&dev->dev))
+               goto err_out;
+
        up_write(&dev->dev.bus->subsys.rwsem);
 
        return dev;
+
+err_out:
+       dev->dev.driver = NULL;
+       dev->card_link = NULL;
+       up_write(&dev->dev.bus->subsys.rwsem);
+       return NULL;
 }
 
 /**
index 9d8b415eca79ba729e0104d1a4af1e7f185123ef..ac9fcd499f3f0cb9912dee2e1400e6f1f519aad3 100644 (file)
@@ -461,8 +461,19 @@ static DEVICE_ATTR(id,S_IRUGO,pnp_show_current_ids,NULL);
 
 int pnp_interface_attach_device(struct pnp_dev *dev)
 {
-       device_create_file(&dev->dev,&dev_attr_options);
-       device_create_file(&dev->dev,&dev_attr_resources);
-       device_create_file(&dev->dev,&dev_attr_id);
+       int rc = device_create_file(&dev->dev,&dev_attr_options);
+       if (rc) goto err;
+       rc = device_create_file(&dev->dev,&dev_attr_resources);
+       if (rc) goto err_opt;
+       rc = device_create_file(&dev->dev,&dev_attr_id);
+       if (rc) goto err_res;
+
        return 0;
+
+err_res:
+       device_remove_file(&dev->dev,&dev_attr_resources);
+err_opt:
+       device_remove_file(&dev->dev,&dev_attr_options);
+err:
+       return rc;
 }
index 81a6c83d89a644bb9400aef1ee2470f0a536784d..33adeba1a31f1c87930e1266989a44b36b9f775e 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/dmi.h>
 #include <linux/delay.h>
 #include <linux/acpi.h>
+#include <linux/freezer.h>
 
 #include <asm/page.h>
 #include <asm/desc.h>
@@ -530,7 +531,8 @@ static int __init pnpbios_init(void)
        if (check_legacy_ioport(PNPBIOS_BASE))
                return -ENODEV;
 #endif
-       if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table)) {
+       if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
+           paravirt_enabled()) {
                printk(KERN_INFO "PnPBIOS: Disabled\n");
                return -ENODEV;
        }
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
new file mode 100644 (file)
index 0000000..b52d547
--- /dev/null
@@ -0,0 +1 @@
+obj-y += system-bus.o
diff --git a/drivers/ps3/system-bus.c b/drivers/ps3/system-bus.c
new file mode 100644 (file)
index 0000000..d79f949
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ *  PS3 system bus driver.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+
+#include <asm/udbg.h>
+#include <asm/ps3.h>
+#include <asm/lv1call.h>
+#include <asm/firmware.h>
+
+#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
+static void _dump_mmio_region(const struct ps3_mmio_region* r,
+       const char* func, int line)
+{
+       pr_debug("%s:%d: dev       %u:%u\n", func, line, r->did.bus_id,
+               r->did.dev_id);
+       pr_debug("%s:%d: bus_addr  %lxh\n", func, line, r->bus_addr);
+       pr_debug("%s:%d: len       %lxh\n", func, line, r->len);
+       pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
+}
+
+int ps3_mmio_region_create(struct ps3_mmio_region *r)
+{
+       int result;
+
+       result = lv1_map_device_mmio_region(r->did.bus_id, r->did.dev_id,
+               r->bus_addr, r->len, r->page_size, &r->lpar_addr);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+               r->lpar_addr = r->len = r->bus_addr = 0;
+       }
+
+       dump_mmio_region(r);
+       return result;
+}
+
+int ps3_free_mmio_region(struct ps3_mmio_region *r)
+{
+       int result;
+
+       result = lv1_unmap_device_mmio_region(r->did.bus_id, r->did.dev_id,
+               r->bus_addr);
+
+       if (result)
+               pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
+                       __func__, __LINE__, ps3_result(result));
+
+       r->lpar_addr = r->len = r->bus_addr = 0;
+       return result;
+}
+
+static int ps3_system_bus_match(struct device *_dev,
+       struct device_driver *_drv)
+{
+       int result;
+       struct ps3_system_bus_driver *drv = to_ps3_system_bus_driver(_drv);
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+
+       result = dev->match_id == drv->match_id;
+
+       pr_info("%s:%d: dev=%u(%s), drv=%u(%s): %s\n", __func__, __LINE__,
+               dev->match_id, dev->core.bus_id, drv->match_id, drv->core.name,
+               (result ? "match" : "miss"));
+       return result;
+}
+
+static int ps3_system_bus_probe(struct device *_dev)
+{
+       int result;
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       struct ps3_system_bus_driver *drv =
+               to_ps3_system_bus_driver(_dev->driver);
+
+       result = lv1_open_device(dev->did.bus_id, dev->did.dev_id, 0);
+
+       if (result) {
+               pr_debug("%s:%d: lv1_open_device failed (%d)\n",
+                       __func__, __LINE__, result);
+               result = -EACCES;
+               goto clean_none;
+       }
+
+       if (dev->d_region->did.bus_id) {
+               result = ps3_dma_region_create(dev->d_region);
+
+               if (result) {
+                       pr_debug("%s:%d: ps3_dma_region_create failed (%d)\n",
+                               __func__, __LINE__, result);
+                       BUG_ON("check region type");
+                       result = -EINVAL;
+                       goto clean_device;
+               }
+       }
+
+       BUG_ON(!drv);
+
+       if (drv->probe)
+               result = drv->probe(dev);
+       else
+               pr_info("%s:%d: %s no probe method\n", __func__, __LINE__,
+                       dev->core.bus_id);
+
+       if (result) {
+               pr_debug("%s:%d: drv->probe failed\n", __func__, __LINE__);
+               goto clean_dma;
+       }
+
+       return result;
+
+clean_dma:
+       ps3_dma_region_free(dev->d_region);
+clean_device:
+       lv1_close_device(dev->did.bus_id, dev->did.dev_id);
+clean_none:
+       return result;
+}
+
+static int ps3_system_bus_remove(struct device *_dev)
+{
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       struct ps3_system_bus_driver *drv =
+               to_ps3_system_bus_driver(_dev->driver);
+
+       if (drv->remove)
+               drv->remove(dev);
+       else
+               pr_info("%s:%d: %s no remove method\n", __func__, __LINE__,
+                       dev->core.bus_id);
+
+       ps3_dma_region_free(dev->d_region);
+       ps3_free_mmio_region(dev->m_region);
+       lv1_close_device(dev->did.bus_id, dev->did.dev_id);
+
+       return 0;
+}
+
+struct bus_type ps3_system_bus_type = {
+        .name = "ps3_system_bus",
+       .match = ps3_system_bus_match,
+       .probe = ps3_system_bus_probe,
+       .remove = ps3_system_bus_remove,
+};
+
+int __init ps3_system_bus_init(void)
+{
+       int result;
+
+       if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+               return 0;
+
+       result = bus_register(&ps3_system_bus_type);
+       BUG_ON(result);
+       return result;
+}
+
+core_initcall(ps3_system_bus_init);
+
+/* Allocates a contiguous real buffer and creates mappings over it.
+ * Returns the virtual address of the buffer and sets dma_handle
+ * to the dma address (mapping) of the first page.
+ */
+
+static void * ps3_alloc_coherent(struct device *_dev, size_t size,
+       dma_addr_t *dma_handle, gfp_t flag)
+{
+       int result;
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       unsigned long virt_addr;
+
+       BUG_ON(!dev->d_region->bus_addr);
+
+       flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       flag |= __GFP_ZERO;
+
+       virt_addr = __get_free_pages(flag, get_order(size));
+
+       if (!virt_addr) {
+               pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
+               goto clean_none;
+       }
+
+       result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle);
+
+       if (result) {
+               pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
+                       __func__, __LINE__, result);
+               BUG_ON("check region type");
+               goto clean_alloc;
+       }
+
+       return (void*)virt_addr;
+
+clean_alloc:
+       free_pages(virt_addr, get_order(size));
+clean_none:
+       dma_handle = NULL;
+       return NULL;
+}
+
+static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
+       dma_addr_t dma_handle)
+{
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+
+       ps3_dma_unmap(dev->d_region, dma_handle, size);
+       free_pages((unsigned long)vaddr, get_order(size));
+}
+
+/* Creates TCEs for a user provided buffer.  The user buffer must be
+ * contiguous real kernel storage (not vmalloc).  The address of the buffer
+ * passed here is the kernel (virtual) address of the buffer.  The buffer
+ * need not be page aligned, the dma_addr_t returned will point to the same
+ * byte within the page as vaddr.
+ */
+
+static dma_addr_t ps3_map_single(struct device *_dev, void *ptr, size_t size,
+       enum dma_data_direction direction)
+{
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       int result;
+       unsigned long bus_addr;
+
+       result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
+               &bus_addr);
+
+       if (result) {
+               pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
+                       __func__, __LINE__, result);
+       }
+
+       return bus_addr;
+}
+
+static void ps3_unmap_single(struct device *_dev, dma_addr_t dma_addr,
+       size_t size, enum dma_data_direction direction)
+{
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       int result;
+
+       result = ps3_dma_unmap(dev->d_region, dma_addr, size);
+
+       if (result) {
+               pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
+                       __func__, __LINE__, result);
+       }
+}
+
+static int ps3_map_sg(struct device *_dev, struct scatterlist *sg, int nents,
+       enum dma_data_direction direction)
+{
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+       BUG_ON("do");
+#endif
+       return 0;
+}
+
+static void ps3_unmap_sg(struct device *_dev, struct scatterlist *sg,
+       int nents, enum dma_data_direction direction)
+{
+#if defined(CONFIG_PS3_DYNAMIC_DMA)
+       BUG_ON("do");
+#endif
+}
+
+static int ps3_dma_supported(struct device *_dev, u64 mask)
+{
+       return 1;
+}
+
+static struct dma_mapping_ops ps3_dma_ops = {
+       .alloc_coherent = ps3_alloc_coherent,
+       .free_coherent = ps3_free_coherent,
+       .map_single = ps3_map_single,
+       .unmap_single = ps3_unmap_single,
+       .map_sg = ps3_map_sg,
+       .unmap_sg = ps3_unmap_sg,
+       .dma_supported = ps3_dma_supported
+};
+
+/**
+ * ps3_system_bus_release_device - remove a device from the system bus
+ */
+
+static void ps3_system_bus_release_device(struct device *_dev)
+{
+       struct ps3_system_bus_device *dev = to_ps3_system_bus_device(_dev);
+       kfree(dev);
+}
+
+/**
+ * ps3_system_bus_device_register - add a device to the system bus
+ *
+ * ps3_system_bus_device_register() expects the dev object to be allocated
+ * dynamically by the caller.  The system bus takes ownership of the dev
+ * object and frees the object in ps3_system_bus_release_device().
+ */
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
+{
+       int result;
+       static unsigned int dev_count = 1;
+
+       dev->core.parent = NULL;
+       dev->core.bus = &ps3_system_bus_type;
+       dev->core.release = ps3_system_bus_release_device;
+
+       dev->core.archdata.of_node = NULL;
+       dev->core.archdata.dma_ops = &ps3_dma_ops;
+       dev->core.archdata.numa_node = 0;
+
+       snprintf(dev->core.bus_id, sizeof(dev->core.bus_id), "sb_%02x",
+               dev_count++);
+
+       pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
+
+       result = device_register(&dev->core);
+       return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
+
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
+{
+       int result;
+
+       drv->core.bus = &ps3_system_bus_type;
+
+       result = driver_register(&drv->core);
+       return result;
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
+
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
+{
+       driver_unregister(&drv->core);
+}
+
+EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
index fc766a7a611ee0a88327561f93f2996431c9bdb4..2a63ab2b47f4a11b4f07681f7111b96500267de9 100644 (file)
@@ -154,15 +154,23 @@ config RTC_DRV_DS1672
          will be called rtc-ds1672.
 
 config RTC_DRV_DS1742
-       tristate "Dallas DS1742"
+       tristate "Dallas DS1742/1743"
        depends on RTC_CLASS
        help
          If you say yes here you get support for the
-         Dallas DS1742 timekeeping chip.
+         Dallas DS1742/1743 timekeeping chip.
 
          This driver can also be built as a module. If so, the module
          will be called rtc-ds1742.
 
+config RTC_DRV_OMAP
+       tristate "TI OMAP1"
+       depends on RTC_CLASS && ( \
+               ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 )
+       help
+         Say "yes" here to support the real time clock on TI OMAP1 chips.
+         This driver can also be built as a module called rtc-omap.
+
 config RTC_DRV_PCF8563
        tristate "Philips PCF8563/Epson RTC8564"
        depends on RTC_CLASS && I2C
index 3ba5ff6e68004700162f2e05c75fe02e438ff0b9..bd4c45d333f07aff1d0a8bb79ba2750520994362 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_RTC_DRV_TEST)    += rtc-test.o
 obj-$(CONFIG_RTC_DRV_DS1307)   += rtc-ds1307.o
 obj-$(CONFIG_RTC_DRV_DS1672)   += rtc-ds1672.o
 obj-$(CONFIG_RTC_DRV_DS1742)   += rtc-ds1742.o
+obj-$(CONFIG_RTC_DRV_OMAP)     += rtc-omap.o
 obj-$(CONFIG_RTC_DRV_PCF8563)  += rtc-pcf8563.o
 obj-$(CONFIG_RTC_DRV_PCF8583)  += rtc-pcf8583.o
 obj-$(CONFIG_RTC_DRV_RS5C372)  += rtc-rs5c372.o
index 814b9e1873f55ef8061b4abfc66ab5776da38dc1..828b329e08e0ca62a23617863825a325ca3511d7 100644 (file)
@@ -53,9 +53,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
  * Routine to poll RTC seconds field for change as often as possible,
  * after first RTC_UIE use timer to reduce polling
  */
-static void rtc_uie_task(void *data)
+static void rtc_uie_task(struct work_struct *work)
 {
-       struct rtc_device *rtc = data;
+       struct rtc_device *rtc =
+               container_of(work, struct rtc_device, uie_task);
        struct rtc_time tm;
        int num = 0;
        int err;
@@ -411,7 +412,7 @@ static int rtc_dev_add_device(struct class_device *class_dev,
        spin_lock_init(&rtc->irq_lock);
        init_waitqueue_head(&rtc->irq_queue);
 #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-       INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc);
+       INIT_WORK(&rtc->uie_task, rtc_uie_task);
        setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
 #endif
 
index 67e816a9a39fdc5c0f64e929f95cc6dbc4fcf58b..dfef1637bfb8a1a69dc1848d75e2b490a458ddaf 100644 (file)
@@ -237,17 +237,22 @@ static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind)
        /* read control register */
        err = ds1672_get_control(client, &control);
        if (err)
-               goto exit_detach;
+               goto exit_devreg;
 
        if (control & DS1672_REG_CONTROL_EOSC)
                dev_warn(&client->dev, "Oscillator not enabled. "
                                        "Set time to enable.\n");
 
        /* Register sysfs hooks */
-       device_create_file(&client->dev, &dev_attr_control);
+       err = device_create_file(&client->dev, &dev_attr_control);
+       if (err)
+               goto exit_devreg;
 
        return 0;
 
+exit_devreg:
+       rtc_device_unregister(rtc);
+
 exit_detach:
        i2c_detach_client(client);
 
index 6273a3d240a2afd2f4a57179d38219f6259797a1..17633bfa848028ba04970d22d05e59a236ed9081 100644 (file)
@@ -6,6 +6,10 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2006 Torsten Ertbjerg Rasmussen <tr@newtec.dk>
+ *  - nvram size determined from resource
+ *  - this ds1742 driver now supports ds1743.
  */
 
 #include <linux/bcd.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
-#define RTC_REG_SIZE           0x800
-#define RTC_OFFSET             0x7f8
+#define RTC_SIZE               8
 
-#define RTC_CONTROL            (RTC_OFFSET + 0)
-#define RTC_CENTURY            (RTC_OFFSET + 0)
-#define RTC_SECONDS            (RTC_OFFSET + 1)
-#define RTC_MINUTES            (RTC_OFFSET + 2)
-#define RTC_HOURS              (RTC_OFFSET + 3)
-#define RTC_DAY                        (RTC_OFFSET + 4)
-#define RTC_DATE               (RTC_OFFSET + 5)
-#define RTC_MONTH              (RTC_OFFSET + 6)
-#define RTC_YEAR               (RTC_OFFSET + 7)
+#define RTC_CONTROL            0
+#define RTC_CENTURY            0
+#define RTC_SECONDS            1
+#define RTC_MINUTES            2
+#define RTC_HOURS              3
+#define RTC_DAY                        4
+#define RTC_DATE               5
+#define RTC_MONTH              6
+#define RTC_YEAR               7
 
 #define RTC_CENTURY_MASK       0x3f
 #define RTC_SECONDS_MASK       0x7f
 
 struct rtc_plat_data {
        struct rtc_device *rtc;
-       void __iomem *ioaddr;
+       void __iomem *ioaddr_nvram;
+       void __iomem *ioaddr_rtc;
+       size_t size_nvram;
+       size_t size;
        unsigned long baseaddr;
        unsigned long last_jiffies;
 };
@@ -57,7 +63,7 @@ static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-       void __iomem *ioaddr = pdata->ioaddr;
+       void __iomem *ioaddr = pdata->ioaddr_rtc;
        u8 century;
 
        century = BIN2BCD((tm->tm_year + 1900) / 100);
@@ -82,7 +88,7 @@ static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-       void __iomem *ioaddr = pdata->ioaddr;
+       void __iomem *ioaddr = pdata->ioaddr_rtc;
        unsigned int year, month, day, hour, minute, second, week;
        unsigned int century;
 
@@ -127,10 +133,10 @@ static ssize_t ds1742_nvram_read(struct kobject *kobj, char *buf,
        struct platform_device *pdev =
                to_platform_device(container_of(kobj, struct device, kobj));
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-       void __iomem *ioaddr = pdata->ioaddr;
+       void __iomem *ioaddr = pdata->ioaddr_nvram;
        ssize_t count;
 
-       for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+       for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
                *buf++ = readb(ioaddr + pos++);
        return count;
 }
@@ -141,10 +147,10 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj, char *buf,
        struct platform_device *pdev =
                to_platform_device(container_of(kobj, struct device, kobj));
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-       void __iomem *ioaddr = pdata->ioaddr;
+       void __iomem *ioaddr = pdata->ioaddr_nvram;
        ssize_t count;
 
-       for (count = 0; size > 0 && pos < RTC_OFFSET; count++, size--)
+       for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--)
                writeb(*buf++, ioaddr + pos++);
        return count;
 }
@@ -155,7 +161,6 @@ static struct bin_attribute ds1742_nvram_attr = {
                .mode = S_IRUGO | S_IWUGO,
                .owner = THIS_MODULE,
        },
-       .size = RTC_OFFSET,
        .read = ds1742_nvram_read,
        .write = ds1742_nvram_write,
 };
@@ -175,19 +180,23 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev)
        pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                return -ENOMEM;
-       if (!request_mem_region(res->start, RTC_REG_SIZE, pdev->name)) {
+       pdata->size = res->end - res->start + 1;
+       if (!request_mem_region(res->start, pdata->size, pdev->name)) {
                ret = -EBUSY;
                goto out;
        }
        pdata->baseaddr = res->start;
-       ioaddr = ioremap(pdata->baseaddr, RTC_REG_SIZE);
+       ioaddr = ioremap(pdata->baseaddr, pdata->size);
        if (!ioaddr) {
                ret = -ENOMEM;
                goto out;
        }
-       pdata->ioaddr = ioaddr;
+       pdata->ioaddr_nvram = ioaddr;
+       pdata->size_nvram = pdata->size - RTC_SIZE;
+       pdata->ioaddr_rtc = ioaddr + pdata->size_nvram;
 
        /* turn RTC on if it was not on */
+       ioaddr = pdata->ioaddr_rtc;
        sec = readb(ioaddr + RTC_SECONDS);
        if (sec & RTC_STOP) {
                sec &= RTC_SECONDS_MASK;
@@ -208,6 +217,8 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev)
        pdata->rtc = rtc;
        pdata->last_jiffies = jiffies;
        platform_set_drvdata(pdev, pdata);
+       ds1742_nvram_attr.size = max(ds1742_nvram_attr.size,
+                                    pdata->size_nvram);
        ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
        if (ret)
                goto out;
@@ -215,10 +226,10 @@ static int __init ds1742_rtc_probe(struct platform_device *pdev)
  out:
        if (pdata->rtc)
                rtc_device_unregister(pdata->rtc);
-       if (ioaddr)
-               iounmap(ioaddr);
+       if (pdata->ioaddr_nvram)
+               iounmap(pdata->ioaddr_nvram);
        if (pdata->baseaddr)
-               release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+               release_mem_region(pdata->baseaddr, pdata->size);
        kfree(pdata);
        return ret;
 }
@@ -229,8 +240,8 @@ static int __devexit ds1742_rtc_remove(struct platform_device *pdev)
 
        sysfs_remove_bin_file(&pdev->dev.kobj, &ds1742_nvram_attr);
        rtc_device_unregister(pdata->rtc);
-       iounmap(pdata->ioaddr);
-       release_mem_region(pdata->baseaddr, RTC_REG_SIZE);
+       iounmap(pdata->ioaddr_nvram);
+       release_mem_region(pdata->baseaddr, pdata->size);
        kfree(pdata);
        return 0;
 }
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
new file mode 100644 (file)
index 0000000..eac5fb1
--- /dev/null
@@ -0,0 +1,572 @@
+/*
+ * TI OMAP1 Real Time Clock interface for Linux
+ *
+ * Copyright (C) 2003 MontaVista Software, Inc.
+ * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
+ *
+ * Copyright (C) 2006 David Brownell (new RTC framework)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/mach/time.h>
+
+
+/* The OMAP1 RTC is a year/month/day/hours/minutes/seconds BCD clock
+ * with century-range alarm matching, driven by the 32kHz clock.
+ *
+ * The main user-visible ways it differs from PC RTCs are by omitting
+ * "don't care" alarm fields and sub-second periodic IRQs, and having
+ * an autoadjust mechanism to calibrate to the true oscillator rate.
+ *
+ * Board-specific wiring options include using split power mode with
+ * RTC_OFF_NOFF used as the reset signal (so the RTC won't be reset),
+ * and wiring RTC_WAKE_INT (so the RTC alarm can wake the system from
+ * low power modes).  See the BOARD-SPECIFIC CUSTOMIZATION comment.
+ */
+
+#define OMAP_RTC_BASE                  0xfffb4800
+
+/* RTC registers */
+#define OMAP_RTC_SECONDS_REG           0x00
+#define OMAP_RTC_MINUTES_REG           0x04
+#define OMAP_RTC_HOURS_REG             0x08
+#define OMAP_RTC_DAYS_REG              0x0C
+#define OMAP_RTC_MONTHS_REG            0x10
+#define OMAP_RTC_YEARS_REG             0x14
+#define OMAP_RTC_WEEKS_REG             0x18
+
+#define OMAP_RTC_ALARM_SECONDS_REG     0x20
+#define OMAP_RTC_ALARM_MINUTES_REG     0x24
+#define OMAP_RTC_ALARM_HOURS_REG       0x28
+#define OMAP_RTC_ALARM_DAYS_REG                0x2c
+#define OMAP_RTC_ALARM_MONTHS_REG      0x30
+#define OMAP_RTC_ALARM_YEARS_REG       0x34
+
+#define OMAP_RTC_CTRL_REG              0x40
+#define OMAP_RTC_STATUS_REG            0x44
+#define OMAP_RTC_INTERRUPTS_REG                0x48
+
+#define OMAP_RTC_COMP_LSB_REG          0x4c
+#define OMAP_RTC_COMP_MSB_REG          0x50
+#define OMAP_RTC_OSC_REG               0x54
+
+/* OMAP_RTC_CTRL_REG bit fields: */
+#define OMAP_RTC_CTRL_SPLIT            (1<<7)
+#define OMAP_RTC_CTRL_DISABLE          (1<<6)
+#define OMAP_RTC_CTRL_SET_32_COUNTER   (1<<5)
+#define OMAP_RTC_CTRL_TEST             (1<<4)
+#define OMAP_RTC_CTRL_MODE_12_24       (1<<3)
+#define OMAP_RTC_CTRL_AUTO_COMP                (1<<2)
+#define OMAP_RTC_CTRL_ROUND_30S                (1<<1)
+#define OMAP_RTC_CTRL_STOP             (1<<0)
+
+/* OMAP_RTC_STATUS_REG bit fields: */
+#define OMAP_RTC_STATUS_POWER_UP        (1<<7)
+#define OMAP_RTC_STATUS_ALARM           (1<<6)
+#define OMAP_RTC_STATUS_1D_EVENT        (1<<5)
+#define OMAP_RTC_STATUS_1H_EVENT        (1<<4)
+#define OMAP_RTC_STATUS_1M_EVENT        (1<<3)
+#define OMAP_RTC_STATUS_1S_EVENT        (1<<2)
+#define OMAP_RTC_STATUS_RUN             (1<<1)
+#define OMAP_RTC_STATUS_BUSY            (1<<0)
+
+/* OMAP_RTC_INTERRUPTS_REG bit fields: */
+#define OMAP_RTC_INTERRUPTS_IT_ALARM    (1<<3)
+#define OMAP_RTC_INTERRUPTS_IT_TIMER    (1<<2)
+
+
+#define rtc_read(addr)         omap_readb(OMAP_RTC_BASE + (addr))
+#define rtc_write(val, addr)   omap_writeb(val, OMAP_RTC_BASE + (addr))
+
+
+/* platform_bus isn't hotpluggable, so for static linkage it'd be safe
+ * to get rid of probe() and remove() code ... too bad the driver struct
+ * remembers probe(), that's about 25% of the runtime footprint!!
+ */
+#ifndef        MODULE
+#undef __devexit
+#undef __devexit_p
+#define        __devexit       __exit
+#define        __devexit_p     __exit_p
+#endif
+
+
+/* we rely on the rtc framework to handle locking (rtc->ops_lock),
+ * so the only other requirement is that register accesses which
+ * require BUSY to be clear are made with IRQs locally disabled
+ */
+static void rtc_wait_not_busy(void)
+{
+       int     count = 0;
+       u8      status;
+
+       /* BUSY may stay active for 1/32768 second (~30 usec) */
+       for (count = 0; count < 50; count++) {
+               status = rtc_read(OMAP_RTC_STATUS_REG);
+               if ((status & (u8)OMAP_RTC_STATUS_BUSY) == 0)
+                       break;
+               udelay(1);
+       }
+       /* now we have ~15 usec to read/write various registers */
+}
+
+static irqreturn_t rtc_irq(int irq, void *class_dev)
+{
+       unsigned long           events = 0;
+       u8                      irq_data;
+
+       irq_data = rtc_read(OMAP_RTC_STATUS_REG);
+
+       /* alarm irq? */
+       if (irq_data & OMAP_RTC_STATUS_ALARM) {
+               rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+               events |= RTC_IRQF | RTC_AF;
+       }
+
+       /* 1/sec periodic/update irq? */
+       if (irq_data & OMAP_RTC_STATUS_1S_EVENT)
+               events |= RTC_IRQF | RTC_UF;
+
+       rtc_update_irq(class_dev, 1, events);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_RTC_INTF_DEV
+
+static int
+omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+       u8 reg;
+
+       switch (cmd) {
+       case RTC_AIE_OFF:
+       case RTC_AIE_ON:
+       case RTC_UIE_OFF:
+       case RTC_UIE_ON:
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+
+       local_irq_disable();
+       rtc_wait_not_busy();
+       reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+       switch (cmd) {
+       /* AIE = Alarm Interrupt Enable */
+       case RTC_AIE_OFF:
+               reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+               break;
+       case RTC_AIE_ON:
+               reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+               break;
+       /* UIE = Update Interrupt Enable (1/second) */
+       case RTC_UIE_OFF:
+               reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
+               break;
+       case RTC_UIE_ON:
+               reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
+               break;
+       }
+       rtc_wait_not_busy();
+       rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+       local_irq_enable();
+
+       return 0;
+}
+
+#else
+#define        omap_rtc_ioctl  NULL
+#endif
+
+/* this hardware doesn't support "don't care" alarm fields */
+static int tm2bcd(struct rtc_time *tm)
+{
+       if (rtc_valid_tm(tm) != 0)
+               return -EINVAL;
+
+       tm->tm_sec = BIN2BCD(tm->tm_sec);
+       tm->tm_min = BIN2BCD(tm->tm_min);
+       tm->tm_hour = BIN2BCD(tm->tm_hour);
+       tm->tm_mday = BIN2BCD(tm->tm_mday);
+
+       tm->tm_mon = BIN2BCD(tm->tm_mon + 1);
+
+       /* epoch == 1900 */
+       if (tm->tm_year < 100 || tm->tm_year > 199)
+               return -EINVAL;
+       tm->tm_year = BIN2BCD(tm->tm_year - 100);
+
+       return 0;
+}
+
+static void bcd2tm(struct rtc_time *tm)
+{
+       tm->tm_sec = BCD2BIN(tm->tm_sec);
+       tm->tm_min = BCD2BIN(tm->tm_min);
+       tm->tm_hour = BCD2BIN(tm->tm_hour);
+       tm->tm_mday = BCD2BIN(tm->tm_mday);
+       tm->tm_mon = BCD2BIN(tm->tm_mon) - 1;
+       /* epoch == 1900 */
+       tm->tm_year = BCD2BIN(tm->tm_year) + 100;
+}
+
+
+static int omap_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       /* we don't report wday/yday/isdst ... */
+       local_irq_disable();
+       rtc_wait_not_busy();
+
+       tm->tm_sec = rtc_read(OMAP_RTC_SECONDS_REG);
+       tm->tm_min = rtc_read(OMAP_RTC_MINUTES_REG);
+       tm->tm_hour = rtc_read(OMAP_RTC_HOURS_REG);
+       tm->tm_mday = rtc_read(OMAP_RTC_DAYS_REG);
+       tm->tm_mon = rtc_read(OMAP_RTC_MONTHS_REG);
+       tm->tm_year = rtc_read(OMAP_RTC_YEARS_REG);
+
+       local_irq_enable();
+
+       bcd2tm(tm);
+       return 0;
+}
+
+static int omap_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       if (tm2bcd(tm) < 0)
+               return -EINVAL;
+       local_irq_disable();
+       rtc_wait_not_busy();
+
+       rtc_write(tm->tm_year, OMAP_RTC_YEARS_REG);
+       rtc_write(tm->tm_mon, OMAP_RTC_MONTHS_REG);
+       rtc_write(tm->tm_mday, OMAP_RTC_DAYS_REG);
+       rtc_write(tm->tm_hour, OMAP_RTC_HOURS_REG);
+       rtc_write(tm->tm_min, OMAP_RTC_MINUTES_REG);
+       rtc_write(tm->tm_sec, OMAP_RTC_SECONDS_REG);
+
+       local_irq_enable();
+
+       return 0;
+}
+
+static int omap_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       local_irq_disable();
+       rtc_wait_not_busy();
+
+       alm->time.tm_sec = rtc_read(OMAP_RTC_ALARM_SECONDS_REG);
+       alm->time.tm_min = rtc_read(OMAP_RTC_ALARM_MINUTES_REG);
+       alm->time.tm_hour = rtc_read(OMAP_RTC_ALARM_HOURS_REG);
+       alm->time.tm_mday = rtc_read(OMAP_RTC_ALARM_DAYS_REG);
+       alm->time.tm_mon = rtc_read(OMAP_RTC_ALARM_MONTHS_REG);
+       alm->time.tm_year = rtc_read(OMAP_RTC_ALARM_YEARS_REG);
+
+       local_irq_enable();
+
+       bcd2tm(&alm->time);
+       alm->pending = !!(rtc_read(OMAP_RTC_INTERRUPTS_REG)
+                       & OMAP_RTC_INTERRUPTS_IT_ALARM);
+       alm->enabled = alm->pending && device_may_wakeup(dev);
+
+       return 0;
+}
+
+static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       u8 reg;
+
+       /* Much userspace code uses RTC_ALM_SET, thus "don't care" for
+        * day/month/year specifies alarms up to 24 hours in the future.
+        * So we need to handle that ... but let's ignore the "don't care"
+        * values for hours/minutes/seconds.
+        */
+       if (alm->time.tm_mday <= 0
+                       && alm->time.tm_mon < 0
+                       && alm->time.tm_year < 0) {
+               struct rtc_time tm;
+               unsigned long now, then;
+
+               omap_rtc_read_time(dev, &tm);
+               rtc_tm_to_time(&tm, &now);
+
+               alm->time.tm_mday = tm.tm_mday;
+               alm->time.tm_mon = tm.tm_mon;
+               alm->time.tm_year = tm.tm_year;
+               rtc_tm_to_time(&alm->time, &then);
+
+               /* sometimes the alarm wraps into tomorrow */
+               if (then < now) {
+                       rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+                       alm->time.tm_mday = tm.tm_mday;
+                       alm->time.tm_mon = tm.tm_mon;
+                       alm->time.tm_year = tm.tm_year;
+               }
+       }
+
+       if (tm2bcd(&alm->time) < 0)
+               return -EINVAL;
+
+       local_irq_disable();
+       rtc_wait_not_busy();
+
+       rtc_write(alm->time.tm_year, OMAP_RTC_ALARM_YEARS_REG);
+       rtc_write(alm->time.tm_mon, OMAP_RTC_ALARM_MONTHS_REG);
+       rtc_write(alm->time.tm_mday, OMAP_RTC_ALARM_DAYS_REG);
+       rtc_write(alm->time.tm_hour, OMAP_RTC_ALARM_HOURS_REG);
+       rtc_write(alm->time.tm_min, OMAP_RTC_ALARM_MINUTES_REG);
+       rtc_write(alm->time.tm_sec, OMAP_RTC_ALARM_SECONDS_REG);
+
+       reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+       if (alm->enabled)
+               reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+       else
+               reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+       rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+
+       local_irq_enable();
+
+       return 0;
+}
+
+static struct rtc_class_ops omap_rtc_ops = {
+       .ioctl          = omap_rtc_ioctl,
+       .read_time      = omap_rtc_read_time,
+       .set_time       = omap_rtc_set_time,
+       .read_alarm     = omap_rtc_read_alarm,
+       .set_alarm      = omap_rtc_set_alarm,
+};
+
+static int omap_rtc_alarm;
+static int omap_rtc_timer;
+
+static int __devinit omap_rtc_probe(struct platform_device *pdev)
+{
+       struct resource         *res, *mem;
+       struct rtc_device       *rtc;
+       u8                      reg, new_ctrl;
+
+       omap_rtc_timer = platform_get_irq(pdev, 0);
+       if (omap_rtc_timer <= 0) {
+               pr_debug("%s: no update irq?\n", pdev->name);
+               return -ENOENT;
+       }
+
+       omap_rtc_alarm = platform_get_irq(pdev, 1);
+       if (omap_rtc_alarm <= 0) {
+               pr_debug("%s: no alarm irq?\n", pdev->name);
+               return -ENOENT;
+       }
+
+       /* NOTE:  using static mapping for RTC registers */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res && res->start != OMAP_RTC_BASE) {
+               pr_debug("%s: RTC registers at %08x, expected %08x\n",
+                       pdev->name, (unsigned) res->start, OMAP_RTC_BASE);
+               return -ENOENT;
+       }
+
+       if (res)
+               mem = request_mem_region(res->start,
+                               res->end - res->start + 1,
+                               pdev->name);
+       else
+               mem = NULL;
+       if (!mem) {
+               pr_debug("%s: RTC registers at %08x are not free\n",
+                       pdev->name, OMAP_RTC_BASE);
+               return -EBUSY;
+       }
+
+       rtc = rtc_device_register(pdev->name, &pdev->dev,
+                       &omap_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               pr_debug("%s: can't register RTC device, err %ld\n",
+                       pdev->name, PTR_ERR(rtc));
+               goto fail;
+       }
+       platform_set_drvdata(pdev, rtc);
+       class_set_devdata(&rtc->class_dev, mem);
+
+       /* clear pending irqs, and set 1/second periodic,
+        * which we'll use instead of update irqs
+        */
+       rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+       /* clear old status */
+       reg = rtc_read(OMAP_RTC_STATUS_REG);
+       if (reg & (u8) OMAP_RTC_STATUS_POWER_UP) {
+               pr_info("%s: RTC power up reset detected\n",
+                       pdev->name);
+               rtc_write(OMAP_RTC_STATUS_POWER_UP, OMAP_RTC_STATUS_REG);
+       }
+       if (reg & (u8) OMAP_RTC_STATUS_ALARM)
+               rtc_write(OMAP_RTC_STATUS_ALARM, OMAP_RTC_STATUS_REG);
+
+       /* handle periodic and alarm irqs */
+       if (request_irq(omap_rtc_timer, rtc_irq, SA_INTERRUPT,
+                       rtc->class_dev.class_id, &rtc->class_dev)) {
+               pr_debug("%s: RTC timer interrupt IRQ%d already claimed\n",
+                       pdev->name, omap_rtc_timer);
+               goto fail0;
+       }
+       if (request_irq(omap_rtc_alarm, rtc_irq, SA_INTERRUPT,
+                       rtc->class_dev.class_id, &rtc->class_dev)) {
+               pr_debug("%s: RTC alarm interrupt IRQ%d already claimed\n",
+                       pdev->name, omap_rtc_alarm);
+               goto fail1;
+       }
+
+       /* On boards with split power, RTC_ON_NOFF won't reset the RTC */
+       reg = rtc_read(OMAP_RTC_CTRL_REG);
+       if (reg & (u8) OMAP_RTC_CTRL_STOP)
+               pr_info("%s: already running\n", pdev->name);
+
+       /* force to 24 hour mode */
+       new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+       new_ctrl |= OMAP_RTC_CTRL_STOP;
+
+       /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
+        *
+        *  - Boards wired so that RTC_WAKE_INT does something, and muxed
+        *    right (W13_1610_RTC_WAKE_INT is the default after chip reset),
+        *    should initialize the device wakeup flag appropriately.
+        *
+        *  - Boards wired so RTC_ON_nOFF is used as the reset signal,
+        *    rather than nPWRON_RESET, should forcibly enable split
+        *    power mode.  (Some chip errata report that RTC_CTRL_SPLIT
+        *    is write-only, and always reads as zero...)
+        */
+       device_init_wakeup(&pdev->dev, 0);
+
+       if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT)
+               pr_info("%s: split power mode\n", pdev->name);
+
+       if (reg != new_ctrl)
+               rtc_write(new_ctrl, OMAP_RTC_CTRL_REG);
+
+       return 0;
+
+fail1:
+       free_irq(omap_rtc_timer, NULL);
+fail0:
+       rtc_device_unregister(rtc);
+fail:
+       release_resource(mem);
+       return -EIO;
+}
+
+static int __devexit omap_rtc_remove(struct platform_device *pdev)
+{
+       struct rtc_device       *rtc = platform_get_drvdata(pdev);;
+
+       device_init_wakeup(&pdev->dev, 0);
+
+       /* leave rtc running, but disable irqs */
+       rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+       free_irq(omap_rtc_timer, rtc);
+       free_irq(omap_rtc_alarm, rtc);
+
+       release_resource(class_get_devdata(&rtc->class_dev));
+       rtc_device_unregister(rtc);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static struct timespec rtc_delta;
+static u8 irqstat;
+
+static int omap_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct rtc_time rtc_tm;
+       struct timespec time;
+
+       time.tv_nsec = 0;
+       omap_rtc_read_time(NULL, &rtc_tm);
+       rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+       save_time_delta(&rtc_delta, &time);
+       irqstat = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+
+       /* FIXME the RTC alarm is not currently acting as a wakeup event
+        * source, and in fact this enable() call is just saving a flag
+        * that's never used...
+        */
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(omap_rtc_alarm);
+       else
+               rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+
+       return 0;
+}
+
+static int omap_rtc_resume(struct platform_device *pdev)
+{
+       struct rtc_time rtc_tm;
+       struct timespec time;
+
+       time.tv_nsec = 0;
+       omap_rtc_read_time(NULL, &rtc_tm);
+       rtc_tm_to_time(&rtc_tm, &time.tv_sec);
+
+       restore_time_delta(&rtc_delta, &time);
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(omap_rtc_alarm);
+       else
+               rtc_write(irqstat, OMAP_RTC_INTERRUPTS_REG);
+       return 0;
+}
+
+#else
+#define omap_rtc_suspend NULL
+#define omap_rtc_resume  NULL
+#endif
+
+static void omap_rtc_shutdown(struct platform_device *pdev)
+{
+       rtc_write(0, OMAP_RTC_INTERRUPTS_REG);
+}
+
+MODULE_ALIAS("omap_rtc");
+static struct platform_driver omap_rtc_driver = {
+       .probe          = omap_rtc_probe,
+       .remove         = __devexit_p(omap_rtc_remove),
+       .suspend        = omap_rtc_suspend,
+       .resume         = omap_rtc_resume,
+       .shutdown       = omap_rtc_shutdown,
+       .driver         = {
+               .name   = "omap_rtc",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init rtc_init(void)
+{
+       return platform_driver_register(&omap_rtc_driver);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+       platform_driver_unregister(&omap_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_AUTHOR("George G. Davis (and others)");
+MODULE_LICENSE("GPL");
index a44fe4efa216ac372cc8eac8d4e0d18aa6c9a71d..e2c7698fdba312c5b759df934d25375bd1c143be 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
 /* Addresses to scan */
 static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
@@ -39,6 +39,14 @@ static int rs5c372_attach(struct i2c_adapter *adapter);
 static int rs5c372_detach(struct i2c_client *client);
 static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind);
 
+struct rs5c372 {
+       u8 reg_addr;
+       u8 regs[17];
+       struct i2c_msg msg[1];
+       struct i2c_client client;
+       struct rtc_device *rtc;
+};
+
 static struct i2c_driver rs5c372_driver = {
        .driver         = {
                .name   = "rs5c372",
@@ -49,18 +57,16 @@ static struct i2c_driver rs5c372_driver = {
 
 static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-       unsigned char buf[7] = { RS5C372_REG_BASE };
 
-       /* this implements the 1st reading method, according
-        * to the datasheet. buf[0] is initialized with
-        * address ptr and transmission format register.
+       struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+       u8 *buf = &(rs5c372->regs[1]);
+
+       /* this implements the 3rd reading method, according
+        * to the datasheet. rs5c372 defaults to internal
+        * address 0xF, so 0x0 is in regs[1]
         */
-       struct i2c_msg msgs[] = {
-               { client->addr, 0, 1, buf },
-               { client->addr, I2C_M_RD, 7, buf },
-       };
 
-       if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
+       if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) {
                dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
                return -EIO;
        }
@@ -114,23 +120,14 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 
 static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
 {
-       unsigned char buf = RS5C372_REG_TRIM;
-
-       struct i2c_msg msgs[] = {
-               { client->addr, 0, 1, &buf },
-               { client->addr, I2C_M_RD, 1, &buf },
-       };
-
-       if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
-               return -EIO;
-       }
+       struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
+       u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1];
 
        if (osc)
-               *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768;
+               *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
 
        if (trim) {
-               *trim = buf & RS5C372_TRIM_MASK;
+               *trim = tmp & RS5C372_TRIM_MASK;
                dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim);
        }
 
@@ -201,7 +198,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
 {
        int err = 0;
        struct i2c_client *client;
-       struct rtc_device *rtc;
+       struct rs5c372 *rs5c372;
 
        dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
 
@@ -210,10 +207,11 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
                goto exit;
        }
 
-       if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
+       if (!(rs5c372 = kzalloc(sizeof(struct rs5c372), GFP_KERNEL))) {
                err = -ENOMEM;
                goto exit;
        }
+       client = &rs5c372->client;
 
        /* I2C client */
        client->addr = address;
@@ -222,32 +220,47 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
 
        strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
 
+       i2c_set_clientdata(client, rs5c372);
+
+       rs5c372->msg[0].addr = address;
+       rs5c372->msg[0].flags = I2C_M_RD;
+       rs5c372->msg[0].len = sizeof(rs5c372->regs);
+       rs5c372->msg[0].buf = rs5c372->regs;
+
        /* Inform the i2c layer */
        if ((err = i2c_attach_client(client)))
                goto exit_kfree;
 
        dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
 
-       rtc = rtc_device_register(rs5c372_driver.driver.name, &client->dev,
-                               &rs5c372_rtc_ops, THIS_MODULE);
+       rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
+                               &client->dev, &rs5c372_rtc_ops, THIS_MODULE);
 
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
+       if (IS_ERR(rs5c372->rtc)) {
+               err = PTR_ERR(rs5c372->rtc);
                goto exit_detach;
        }
 
-       i2c_set_clientdata(client, rtc);
-
-       device_create_file(&client->dev, &dev_attr_trim);
-       device_create_file(&client->dev, &dev_attr_osc);
+       err = device_create_file(&client->dev, &dev_attr_trim);
+       if (err)
+               goto exit_devreg;
+       err = device_create_file(&client->dev, &dev_attr_osc);
+       if (err)
+               goto exit_trim;
 
        return 0;
 
+exit_trim:
+       device_remove_file(&client->dev, &dev_attr_trim);
+
+exit_devreg:
+       rtc_device_unregister(rs5c372->rtc);
+
 exit_detach:
        i2c_detach_client(client);
 
 exit_kfree:
-       kfree(client);
+       kfree(rs5c372);
 
 exit:
        return err;
@@ -256,16 +269,15 @@ exit:
 static int rs5c372_detach(struct i2c_client *client)
 {
        int err;
-       struct rtc_device *rtc = i2c_get_clientdata(client);
+       struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
 
-       if (rtc)
-               rtc_device_unregister(rtc);
+       if (rs5c372->rtc)
+               rtc_device_unregister(rs5c372->rtc);
 
        if ((err = i2c_detach_client(client)))
                return err;
 
-       kfree(client);
-
+       kfree(rs5c372);
        return 0;
 }
 
index 6ef9c62d5032afa97122331c4754513f9b44b0dc..f50a1b8e160706982f91c5a051abcc88f73d333e 100644 (file)
@@ -123,11 +123,18 @@ static int test_probe(struct platform_device *plat_dev)
                err = PTR_ERR(rtc);
                return err;
        }
-       device_create_file(&plat_dev->dev, &dev_attr_irq);
+
+       err = device_create_file(&plat_dev->dev, &dev_attr_irq);
+       if (err)
+               goto err;
 
        platform_set_drvdata(plat_dev, rtc);
 
        return 0;
+
+err:
+       rtc_device_unregister(rtc);
+       return err;
 }
 
 static int __devexit test_remove(struct platform_device *plat_dev)
index 522c69753bbfb3843b6dccf1c4f922b43e45a666..9a67487d086ba55ab36e202c1e7bbc9f9cf1239d 100644 (file)
@@ -562,11 +562,19 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
        else
                dev_err(&client->dev, "couldn't read status\n");
 
-       device_create_file(&client->dev, &dev_attr_atrim);
-       device_create_file(&client->dev, &dev_attr_dtrim);
+       err = device_create_file(&client->dev, &dev_attr_atrim);
+       if (err) goto exit_devreg;
+       err = device_create_file(&client->dev, &dev_attr_dtrim);
+       if (err) goto exit_atrim;
 
        return 0;
 
+exit_atrim:
+       device_remove_file(&client->dev, &dev_attr_atrim);
+
+exit_devreg:
+       rtc_device_unregister(rtc);
+
 exit_detach:
        i2c_detach_client(client);
 
index a2cef57d7bcb2e72bc26366902c41c6b461f498e..2af2d9b53d180fff0148fb4ab830ca5fe1c0d135 100644 (file)
@@ -54,7 +54,7 @@ static void dasd_flush_request_queue(struct dasd_device *);
 static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
 static int dasd_flush_ccw_queue(struct dasd_device *, int);
 static void dasd_tasklet(struct dasd_device *);
-static void do_kick_device(void *data);
+static void do_kick_device(struct work_struct *);
 
 /*
  * SECTION: Operations on the device structure.
@@ -100,7 +100,7 @@ dasd_alloc_device(void)
                     (unsigned long) device);
        INIT_LIST_HEAD(&device->ccw_queue);
        init_timer(&device->timer);
-       INIT_WORK(&device->kick_work, do_kick_device, device);
+       INIT_WORK(&device->kick_work, do_kick_device);
        device->state = DASD_STATE_NEW;
        device->target = DASD_STATE_NEW;
 
@@ -407,11 +407,9 @@ dasd_change_state(struct dasd_device *device)
  * event daemon.
  */
 static void
-do_kick_device(void *data)
+do_kick_device(struct work_struct *work)
 {
-       struct dasd_device *device;
-
-       device = (struct dasd_device *) data;
+       struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
        dasd_change_state(device);
        dasd_schedule_bh(device);
        dasd_put_device(device);
index 17fdd8c9f740107dea18645fcf3133bbfd2ce29d..cf28ccc579487ff5b5daa661e4ebe3a1d3f6a6e2 100644 (file)
@@ -25,7 +25,7 @@
 
 #include "dasd_int.h"
 
-kmem_cache_t *dasd_page_cache;
+struct kmem_cache *dasd_page_cache;
 EXPORT_SYMBOL_GPL(dasd_page_cache);
 
 /*
index 5ecea3e4fdefd3a5bdaa8ef0cebc0711dc91028f..fdaa471e845fa78c73ff7a704bada1fb0f0b3ff7 100644 (file)
@@ -1215,7 +1215,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
                dst = page_address(bv->bv_page) + bv->bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
-                                                     SLAB_DMA | __GFP_NOWARN);
+                                                     GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
                                memcpy(copy + bv->bv_offset, dst, bv->bv_len);
                        if (copy)
index 80926c5482281293644f52e06888060e804a9f2a..b857fd5893fdc4d35586e929e3060a070aa5b4d5 100644 (file)
@@ -308,7 +308,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
                dst = page_address(bv->bv_page) + bv->bv_offset;
                if (dasd_page_cache) {
                        char *copy = kmem_cache_alloc(dasd_page_cache,
-                                                     SLAB_DMA | __GFP_NOWARN);
+                                                     GFP_DMA | __GFP_NOWARN);
                        if (copy && rq_data_dir(req) == WRITE)
                                memcpy(copy + bv->bv_offset, dst, bv->bv_len);
                        if (copy)
index 9f52004f6fc2f05315485842fabc029e4de4a8c3..dc5dd509434d93463684563014cfe46cb1a6808b 100644 (file)
@@ -474,7 +474,7 @@ extern struct dasd_profile_info_t dasd_global_profile;
 extern unsigned int dasd_profile_level;
 extern struct block_device_operations dasd_device_operations;
 
-extern kmem_cache_t *dasd_page_cache;
+extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
 dasd_kmalloc_request(char *, int, int, struct dasd_device *);
index ad7f7e1c01631608e3ce529352b9ce4de74d3832..26cf2f5ae2e75bfedcd22397b69850f7eebd229c 100644 (file)
@@ -334,7 +334,7 @@ static LIST_HEAD(slow_subchannels_head);
 static DEFINE_SPINLOCK(slow_subchannel_lock);
 
 static void
-css_trigger_slow_path(void)
+css_trigger_slow_path(struct work_struct *unused)
 {
        CIO_TRACE_EVENT(4, "slowpath");
 
@@ -359,8 +359,7 @@ css_trigger_slow_path(void)
        spin_unlock_irq(&slow_subchannel_lock);
 }
 
-typedef void (*workfunc)(void *);
-DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL);
+DECLARE_WORK(slow_path_work, css_trigger_slow_path);
 struct workqueue_struct *slow_path_wq;
 
 /* Reprobe subchannel if unregistered. */
@@ -397,7 +396,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
 }
 
 /* Work function used to reprobe all unregistered subchannels. */
-static void reprobe_all(void *data)
+static void reprobe_all(struct work_struct *unused)
 {
        int ret;
 
@@ -413,7 +412,7 @@ static void reprobe_all(void *data)
                      need_reprobe);
 }
 
-DECLARE_WORK(css_reprobe_work, reprobe_all, NULL);
+DECLARE_WORK(css_reprobe_work, reprobe_all);
 
 /* Schedule reprobing of all unregistered subchannels. */
 void css_schedule_reprobe(void)
index 6a54334ffe090418ccb5217ef1621b3aeea81075..e4dc947e74e9fc2402a70e438c6e649bb0853ad6 100644 (file)
@@ -37,7 +37,7 @@
 #include "ap_bus.h"
 
 /* Some prototypes. */
-static void ap_scan_bus(void *);
+static void ap_scan_bus(struct work_struct *);
 static void ap_poll_all(unsigned long);
 static void ap_poll_timeout(unsigned long);
 static int ap_poll_thread_start(void);
@@ -71,7 +71,7 @@ static struct device *ap_root_device = NULL;
 static struct workqueue_struct *ap_work_queue;
 static struct timer_list ap_config_timer;
 static int ap_config_time = AP_CONFIG_TIME;
-static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
+static DECLARE_WORK(ap_config_work, ap_scan_bus);
 
 /**
  * Tasklet & timer for AP request polling.
@@ -732,7 +732,7 @@ static void ap_device_release(struct device *dev)
        kfree(ap_dev);
 }
 
-static void ap_scan_bus(void *data)
+static void ap_scan_bus(struct work_struct *unused)
 {
        struct ap_device *ap_dev;
        struct device *dev;
index 08d4e47070bda333b5fe97808b5d578d6756b69f..e5665b6743a1379d37e949cb549fab52dbb40021 100644 (file)
@@ -67,7 +67,7 @@ static char debug_buffer[255];
  * Some prototypes.
  */
 static void lcs_tasklet(unsigned long);
-static void lcs_start_kernel_thread(struct lcs_card *card);
+static void lcs_start_kernel_thread(struct work_struct *);
 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
 static int lcs_recovery(void *ptr);
@@ -1724,8 +1724,9 @@ lcs_stopcard(struct lcs_card *card)
  * Kernel Thread helper functions for LGW initiated commands
  */
 static void
-lcs_start_kernel_thread(struct lcs_card *card)
+lcs_start_kernel_thread(struct work_struct *work)
 {
+       struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
        LCS_DBF_TEXT(5, trace, "krnthrd");
        if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
                kernel_thread(lcs_recovery, (void *) card, SIGCHLD);
@@ -2053,8 +2054,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
        ccwgdev->cdev[0]->handler = lcs_irq;
        ccwgdev->cdev[1]->handler = lcs_irq;
        card->gdev = ccwgdev;
-       INIT_WORK(&card->kernel_thread_starter,
-                 (void *) lcs_start_kernel_thread, card);
+       INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
        card->thread_start_mask = 0;
        card->thread_allowed_mask = 0;
        card->thread_running_mask = 0;
index 7fdc5272c44622fd9f3e7f003e59b25802d04361..2bde4f1fb9c2f320783f9ae08ced9b64fc7cf31f 100644 (file)
@@ -1039,8 +1039,9 @@ qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
 }
 
 static void
-qeth_start_kernel_thread(struct qeth_card *card)
+qeth_start_kernel_thread(struct work_struct *work)
 {
+       struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter);
        QETH_DBF_TEXT(trace , 2, "strthrd");
 
        if (card->read.state != CH_STATE_UP &&
@@ -1103,8 +1104,7 @@ qeth_setup_card(struct qeth_card *card)
        card->thread_start_mask = 0;
        card->thread_allowed_mask = 0;
        card->thread_running_mask = 0;
-       INIT_WORK(&card->kernel_thread_starter,
-                 (void *)qeth_start_kernel_thread,card);
+       INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
        INIT_LIST_HEAD(&card->ip_list);
        card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
        if (!card->ip_tbd_list) {
index 74c0eac083e46a19d5026c385350268b6fa39047..32933ed54b8a05dbe3534ac90395bbbf3d1ee283 100644 (file)
@@ -1032,9 +1032,9 @@ struct zfcp_data {
        wwn_t                   init_wwpn;
        fcp_lun_t               init_fcp_lun;
        char                    *driver_version;
-       kmem_cache_t            *fsf_req_qtcb_cache;
-       kmem_cache_t            *sr_buffer_cache;
-       kmem_cache_t            *gid_pn_cache;
+       struct kmem_cache               *fsf_req_qtcb_cache;
+       struct kmem_cache               *sr_buffer_cache;
+       struct kmem_cache               *gid_pn_cache;
 };
 
 /**
index 277826cdd0c845ff3eda616ebc7eb0130169e7c1..067f1519eb04dcda87ff07f79d6cae072675770e 100644 (file)
@@ -109,7 +109,7 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
                        ptr = kmalloc(size, GFP_ATOMIC);
                else
                        ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
-                                              SLAB_ATOMIC);
+                                              GFP_ATOMIC);
        }
 
        if (unlikely(!ptr))
index 562432d017b0c39cc74704031bb4dc1a46df30d8..68103e508db7077455dd437db6eae86797d63c0c 100644 (file)
@@ -313,7 +313,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
        hostdata->status = memory + STATUS_OFFSET;
        /* all of these offsets are L1_CACHE_BYTES separated.  It is fatal
         * if this isn't sufficient separation to avoid dma flushing issues */
-       BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
+       BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
        hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
        hostdata->dev = dev;
 
@@ -362,11 +362,11 @@ NCR_700_detect(struct scsi_host_template *tpnt,
        for (j = 0; j < PATCHES; j++)
                script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
        /* now patch up fixed addresses. */
-       script_patch_32(script, MessageLocation,
+       script_patch_32(hostdata->dev, script, MessageLocation,
                        pScript + MSGOUT_OFFSET);
-       script_patch_32(script, StatusAddress,
+       script_patch_32(hostdata->dev, script, StatusAddress,
                        pScript + STATUS_OFFSET);
-       script_patch_32(script, ReceiveMsgAddress,
+       script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
                        pScript + MSGIN_OFFSET);
 
        hostdata->script = script;
@@ -622,8 +622,10 @@ NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
                        dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
                        /* restore the old result if the request sense was
                         * successful */
-                       if(result == 0)
+                       if (result == 0)
                                result = cmnd[7];
+                       /* restore the original length */
+                       SCp->cmd_len = cmnd[8];
                } else
                        NCR_700_unmap(hostdata, SCp, slot);
 
@@ -819,8 +821,9 @@ process_extended_message(struct Scsi_Host *host,
                        shost_printk(KERN_WARNING, host,
                                "Unexpected SDTR msg\n");
                        hostdata->msgout[0] = A_REJECT_MSG;
-                       dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-                       script_patch_16(hostdata->script, MessageCount, 1);
+                       dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+                       script_patch_16(hostdata->dev, hostdata->script,
+                                       MessageCount, 1);
                        /* SendMsgOut returns, so set up the return
                         * address */
                        resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -831,8 +834,9 @@ process_extended_message(struct Scsi_Host *host,
                printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
                       host->host_no, pun, lun);
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
 
                break;
@@ -845,8 +849,9 @@ process_extended_message(struct Scsi_Host *host,
                printk("\n");
                /* just reject it */
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                /* SendMsgOut returns, so set up the return
                 * address */
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -927,8 +932,9 @@ process_message(struct Scsi_Host *host,     struct NCR_700_Host_Parameters *hostdata
                printk("\n");
                /* just reject it */
                hostdata->msgout[0] = A_REJECT_MSG;
-               dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
-               script_patch_16(hostdata->script, MessageCount, 1);
+               dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
+               script_patch_16(hostdata->dev, hostdata->script, MessageCount,
+                               1);
                /* SendMsgOut returns, so set up the return
                 * address */
                resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
@@ -937,7 +943,7 @@ process_message(struct Scsi_Host *host,     struct NCR_700_Host_Parameters *hostdata
        }
        NCR_700_writel(temp, host, TEMP_REG);
        /* set us up to receive another message */
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
        return resume_offset;
 }
 
@@ -1007,6 +1013,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                                 * of the command */
                                cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
                                cmnd[7] = hostdata->status[0];
+                               cmnd[8] = SCp->cmd_len;
+                               SCp->cmd_len = 6; /* command length for
+                                                  * REQUEST_SENSE */
                                slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
                                slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
                                slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
@@ -1014,9 +1023,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                                slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
                                slot->SG[1].pAddr = 0;
                                slot->resume_offset = hostdata->pScript;
-                               dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
-                               dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
-                               
+                               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
+                               dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
+
                                /* queue the command for reissue */
                                slot->state = NCR_700_SLOT_QUEUED;
                                slot->flags = NCR_700_FLAG_AUTOSENSE;
@@ -1131,11 +1140,12 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                        hostdata->cmd = slot->cmnd;
 
                        /* re-patch for this command */
-                       script_patch_32_abs(hostdata->script, CommandAddress, 
-                                           slot->pCmd);
-                       script_patch_16(hostdata->script,
+                       script_patch_32_abs(hostdata->dev, hostdata->script,
+                                           CommandAddress, slot->pCmd);
+                       script_patch_16(hostdata->dev, hostdata->script,
                                        CommandCount, slot->cmnd->cmd_len);
-                       script_patch_32_abs(hostdata->script, SGScriptStartAddress,
+                       script_patch_32_abs(hostdata->dev, hostdata->script,
+                                           SGScriptStartAddress,
                                            to32bit(&slot->pSG[0].ins));
 
                        /* Note: setting SXFER only works if we're
@@ -1145,13 +1155,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                         * should therefore always clear ACK */
                        NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
                                       host, SXFER_REG);
-                       dma_cache_sync(hostdata->msgin,
+                       dma_cache_sync(hostdata->dev, hostdata->msgin,
                                       MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
-                       dma_cache_sync(hostdata->msgout,
+                       dma_cache_sync(hostdata->dev, hostdata->msgout,
                                       MSG_ARRAY_SIZE, DMA_TO_DEVICE);
                        /* I'm just being paranoid here, the command should
                         * already have been flushed from the cache */
-                       dma_cache_sync(slot->cmnd->cmnd,
+                       dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
                                       slot->cmnd->cmd_len, DMA_TO_DEVICE);
 
 
@@ -1215,7 +1225,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                hostdata->reselection_id = reselection_id;
                /* just in case we have a stale simple tag message, clear it */
                hostdata->msgin[1] = 0;
-               dma_cache_sync(hostdata->msgin,
+               dma_cache_sync(hostdata->dev, hostdata->msgin,
                               MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
                if(hostdata->tag_negotiated & (1<<reselection_id)) {
                        resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
@@ -1331,7 +1341,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp)
        hostdata->cmd = NULL;
        /* clear any stale simple tag message */
        hostdata->msgin[1] = 0;
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
                       DMA_BIDIRECTIONAL);
 
        if(id == 0xff) {
@@ -1428,29 +1438,30 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
                NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
        }
 
-       script_patch_16(hostdata->script, MessageCount, count);
+       script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
 
 
-       script_patch_ID(hostdata->script,
+       script_patch_ID(hostdata->dev, hostdata->script,
                        Device_ID, 1<<scmd_id(SCp));
 
-       script_patch_32_abs(hostdata->script, CommandAddress, 
+       script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
                            slot->pCmd);
-       script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
+       script_patch_16(hostdata->dev, hostdata->script, CommandCount,
+                       SCp->cmd_len);
        /* finally plumb the beginning of the SG list into the script
         * */
-       script_patch_32_abs(hostdata->script, SGScriptStartAddress,
-                           to32bit(&slot->pSG[0].ins));
+       script_patch_32_abs(hostdata->dev, hostdata->script,
+                           SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
        NCR_700_clear_fifo(SCp->device->host);
 
        if(slot->resume_offset == 0)
                slot->resume_offset = hostdata->pScript;
        /* now perform all the writebacks and invalidates */
-       dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
-       dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
+       dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
                       DMA_FROM_DEVICE);
-       dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
-       dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
+       dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
+       dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
 
        /* set the synchronous period/offset */
        NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
@@ -1626,7 +1637,7 @@ NCR_700_intr(int irq, void *dev_id)
                                        slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
                                        slot->SG[i].pAddr = 0;
                                }
-                               dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+                               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
                                /* and pretend we disconnected after
                                 * the command phase */
                                resume_offset = hostdata->pScript + Ent_MsgInDuringData;
@@ -1892,9 +1903,9 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
                }
                slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
                slot->SG[i].pAddr = 0;
-               dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
+               dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
                DEBUG((" SETTING %08lx to %x\n",
-                      (&slot->pSG[i].ins), 
+                      (&slot->pSG[i].ins),
                       slot->SG[i].ins));
        }
        slot->resume_offset = 0;
index f5c3caf344a7626b4c87bf670595e6fa120a5bc6..f38822db4210886f8654c33bd03a3e2dd15635b9 100644 (file)
@@ -415,31 +415,31 @@ struct NCR_700_Host_Parameters {
 #define NCR_710_MIN_XFERP      0
 #define NCR_700_MIN_PERIOD     25 /* for SDTR message, 100ns */
 
-#define script_patch_32(script, symbol, value) \
+#define script_patch_32(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching %s at %d to 0x%lx\n", \
                       #symbol, A_##symbol##_used[i], (value))); \
        } \
 }
 
-#define script_patch_32_abs(script, symbol, value) \
+#define script_patch_32_abs(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
                (script)[A_##symbol##_used[i]] = bS_to_host(value); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching %s at %d to 0x%lx\n", \
                       #symbol, A_##symbol##_used[i], (value))); \
        } \
 }
 
 /* Used for patching the SCSI ID in the SELECT instruction */
-#define script_patch_ID(script, symbol, value) \
+#define script_patch_ID(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -447,13 +447,13 @@ struct NCR_700_Host_Parameters {
                val &= 0xff00ffff; \
                val |= ((value) & 0xff) << 16; \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
                       #symbol, A_##symbol##_used[i], val)); \
        } \
 }
 
-#define script_patch_16(script, symbol, value) \
+#define script_patch_16(dev, script, symbol, value) \
 { \
        int i; \
        for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
@@ -461,7 +461,7 @@ struct NCR_700_Host_Parameters {
                val &= 0xffff0000; \
                val |= ((value) & 0xffff); \
                (script)[A_##symbol##_used[i]] = bS_to_host(val); \
-               dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
+               dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
                DEBUG((" script, patching short field %s at %d to 0x%x\n", \
                       #symbol, A_##symbol##_used[i], val)); \
        } \
index cdd03372478617f3e13a2220cc715fb836f7d0b7..3075204915c867b532509788e18668393840dd0b 100644 (file)
@@ -2186,21 +2186,21 @@ static int __init BusLogic_init(void)
 
        if (BusLogic_ProbeOptions.NoProbe)
                return -ENODEV;
-       BusLogic_ProbeInfoList = (struct BusLogic_ProbeInfo *)
-           kmalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_ATOMIC);
+       BusLogic_ProbeInfoList =
+           kzalloc(BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo), GFP_KERNEL);
        if (BusLogic_ProbeInfoList == NULL) {
                BusLogic_Error("BusLogic: Unable to allocate Probe Info List\n", NULL);
                return -ENOMEM;
        }
-       memset(BusLogic_ProbeInfoList, 0, BusLogic_MaxHostAdapters * sizeof(struct BusLogic_ProbeInfo));
-       PrototypeHostAdapter = (struct BusLogic_HostAdapter *)
-           kmalloc(sizeof(struct BusLogic_HostAdapter), GFP_ATOMIC);
+
+       PrototypeHostAdapter =
+           kzalloc(sizeof(struct BusLogic_HostAdapter), GFP_KERNEL);
        if (PrototypeHostAdapter == NULL) {
                kfree(BusLogic_ProbeInfoList);
                BusLogic_Error("BusLogic: Unable to allocate Prototype " "Host Adapter\n", NULL);
                return -ENOMEM;
        }
-       memset(PrototypeHostAdapter, 0, sizeof(struct BusLogic_HostAdapter));
+
 #ifdef MODULE
        if (BusLogic != NULL)
                BusLogic_Setup(BusLogic);
index 9540eb8efdcbfc5b6db2051470bf9a203112fe59..69569096dae537f164d52df669510de68bf866a8 100644 (file)
@@ -29,6 +29,13 @@ config SCSI
          However, do not compile this as a module if your root file system
          (the one containing the directory /) is located on a SCSI device.
 
+config SCSI_TGT
+       tristate "SCSI target support"
+       depends on SCSI && EXPERIMENTAL
+       ---help---
+         If you want to use SCSI target mode drivers enable this option.
+         If you choose M, the module will be called scsi_tgt.
+
 config SCSI_NETLINK
        bool
        default n
@@ -216,6 +223,23 @@ config SCSI_LOGGING
          there should be no noticeable performance impact as long as you have
          logging turned off.
 
+config SCSI_SCAN_ASYNC
+       bool "Asynchronous SCSI scanning"
+       depends on SCSI
+       help
+         The SCSI subsystem can probe for devices while the rest of the
+         system continues booting, and even probe devices on different
+         busses in parallel, leading to a significant speed-up.
+         If you have built SCSI as modules, enabling this option can
+         be a problem as the devices may not have been found by the
+         time your system expects them to have been.  You can load the
+         scsi_wait_scan module to ensure that all scans have completed.
+         If you build your SCSI drivers into the kernel, then everything
+         will work fine if you say Y here.
+
+         You can override this choice by specifying scsi_mod.scan="sync"
+         or "async" on the kernel's command line.
+
 menu "SCSI Transports"
        depends on SCSI
 
@@ -797,6 +821,20 @@ config SCSI_IBMVSCSI
          To compile this driver as a module, choose M here: the
          module will be called ibmvscsic.
 
+config SCSI_IBMVSCSIS
+       tristate "IBM Virtual SCSI Server support"
+       depends on PPC_PSERIES && SCSI_TGT && SCSI_SRP
+       help
+         This is the SRP target driver for IBM pSeries virtual environments.
+
+         The userspace component needed to initialize the driver and
+         documentation can be found:
+
+         http://stgt.berlios.de/
+
+         To compile this driver as a module, choose M here: the
+         module will be called ibmvstgt.
+
 config SCSI_INITIO
        tristate "Initio 9100U(W) support"
        depends on PCI && SCSI
@@ -944,8 +982,13 @@ config SCSI_STEX
        tristate "Promise SuperTrak EX Series support"
        depends on PCI && SCSI
        ---help---
-         This driver supports Promise SuperTrak EX8350/8300/16350/16300
-         Storage controllers.
+         This driver supports Promise SuperTrak EX series storage controllers.
+
+         Promise provides Linux RAID configuration utility for these
+         controllers. Please visit <http://www.promise.com> to download.
+
+         To compile this driver as a module, choose M here: the
+         module will be called stex.
 
 config SCSI_SYM53C8XX_2
        tristate "SYM53C8XX Version 2 SCSI support"
@@ -1026,6 +1069,7 @@ config SCSI_IPR
 config SCSI_IPR_TRACE
        bool "enable driver internal trace"
        depends on SCSI_IPR
+       default y
        help
          If you say Y here, the driver will trace all commands issued
          to the adapter. Performance impact is minimal. Trace can be
@@ -1034,6 +1078,7 @@ config SCSI_IPR_TRACE
 config SCSI_IPR_DUMP
        bool "enable adapter dump support"
        depends on SCSI_IPR
+       default y
        help
          If you say Y here, the driver will support adapter crash dump.
          If you enable this support, the iprdump daemon can be used
@@ -1734,6 +1779,16 @@ config ZFCP
           called zfcp. If you want to compile it as a module, say M here
           and read <file:Documentation/modules.txt>.
 
+config SCSI_SRP
+       tristate "SCSI RDMA Protocol helper library"
+       depends on SCSI && PCI
+       select SCSI_TGT
+       help
+         If you wish to use SRP target drivers, say Y.
+
+         To compile this driver as a module, choose M here: the
+         module will be called libsrp.
+
 endmenu
 
 source "drivers/scsi/pcmcia/Kconfig"
index bcca39c3bcbf33096bb3fdc5c9a493996c2c9492..bd7c9888f7f40ca31052e1fa62796fbb08fb9e37 100644 (file)
@@ -21,6 +21,7 @@ CFLAGS_seagate.o =   -DARBITRATE -DPARITY -DSEAGATE_USE_ASM
 subdir-$(CONFIG_PCMCIA)                += pcmcia
 
 obj-$(CONFIG_SCSI)             += scsi_mod.o
+obj-$(CONFIG_SCSI_TGT)         += scsi_tgt.o
 
 obj-$(CONFIG_RAID_ATTRS)       += raid_class.o
 
@@ -125,7 +126,9 @@ obj-$(CONFIG_SCSI_FCAL)             += fcal.o
 obj-$(CONFIG_SCSI_LASI700)     += 53c700.o lasi700.o
 obj-$(CONFIG_SCSI_NSP32)       += nsp32.o
 obj-$(CONFIG_SCSI_IPR)         += ipr.o
+obj-$(CONFIG_SCSI_SRP)         += libsrp.o
 obj-$(CONFIG_SCSI_IBMVSCSI)    += ibmvscsi/
+obj-$(CONFIG_SCSI_IBMVSCSIS)   += ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)      += hptiop.o
 obj-$(CONFIG_SCSI_STEX)                += stex.o
 
@@ -141,6 +144,8 @@ obj-$(CONFIG_CHR_DEV_SCH)   += ch.o
 # This goes last, so that "real" scsi devices probe earlier
 obj-$(CONFIG_SCSI_DEBUG)       += scsi_debug.o
 
+obj-$(CONFIG_SCSI)             += scsi_wait_scan.o
+
 scsi_mod-y                     += scsi.o hosts.o scsi_ioctl.o constants.o \
                                   scsicam.o scsi_error.o scsi_lib.o \
                                   scsi_scan.o scsi_sysfs.o \
@@ -149,6 +154,8 @@ scsi_mod-$(CONFIG_SCSI_NETLINK)     += scsi_netlink.o
 scsi_mod-$(CONFIG_SYSCTL)      += scsi_sysctl.o
 scsi_mod-$(CONFIG_SCSI_PROC_FS)        += scsi_proc.o
 
+scsi_tgt-y                     += scsi_tgt_lib.o scsi_tgt_if.o
+
 sd_mod-objs    := sd.o
 sr_mod-objs    := sr.o sr_ioctl.o sr_vendor.o
 ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
index a6aa9107288090ed544abeaf4d2a4e51bf147267..bb3cb33605417aa3f4b23dd4dc6216b744ebc2ad 100644 (file)
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
        hostdata->issue_queue = NULL;
        hostdata->disconnected_queue = NULL;
        
-       INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata);
+       INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
        
 #ifdef NCR5380_STATS
        for (i = 0; i < 8; ++i) {
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
 
        /* Run the coroutine if it isn't already running. */
        /* Kick off command processing */
-       schedule_work(&hostdata->coroutine);
+       schedule_delayed_work(&hostdata->coroutine, 0);
        return 0;
 }
 
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
  *     host lock and called routines may take the isa dma lock.
  */
 
-static void NCR5380_main(void *p)
+static void NCR5380_main(struct work_struct *work)
 {
-       struct NCR5380_hostdata *hostdata = p;
+       struct NCR5380_hostdata *hostdata =
+               container_of(work, struct NCR5380_hostdata, coroutine.work);
        struct Scsi_Host *instance = hostdata->host;
        Scsi_Cmnd *tmp, *prev;
        int done;
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
                }       /* if BASR_IRQ */
                spin_unlock_irqrestore(instance->host_lock, flags);
                if(!done)
-                       schedule_work(&hostdata->coroutine);
+                       schedule_delayed_work(&hostdata->coroutine, 0);
        } while (!done);
        return IRQ_HANDLED;
 }
index 1bc73de496b01d81ae5964a2246579b37b617c92..713a108c02ef1901c99667773204e84b7602ef12 100644 (file)
@@ -271,7 +271,7 @@ struct NCR5380_hostdata {
        unsigned long time_expires;             /* in jiffies, set prior to sleeping */
        int select_time;                        /* timer in select for target response */
        volatile Scsi_Cmnd *selecting;
-       struct work_struct coroutine;           /* our co-routine */
+       struct delayed_work coroutine;          /* our co-routine */
 #ifdef NCR5380_STATS
        unsigned timebase;                      /* Base for time calcs */
        long time_read[8];                      /* time to do reads */
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
 #ifndef DONT_USE_INTR
 static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 #endif
-static void NCR5380_main(void *ptr);
+static void NCR5380_main(struct work_struct *work);
 static void NCR5380_print_options(struct Scsi_Host *instance);
 #ifdef NDEBUG
 static void NCR5380_print_phase(struct Scsi_Host *instance);
index d4613815f685d26115808d1887223e3c8d7b2e96..8578555d58fd08f79a4515af12c7f9e990d8cdd4 100644 (file)
@@ -220,9 +220,11 @@ static void *addresses[] = {
 static unsigned short ports[] = { 0x230, 0x330, 0x280, 0x290, 0x330, 0x340, 0x300, 0x310, 0x348, 0x350 };
 #define PORT_COUNT ARRAY_SIZE(ports)
 
+#ifndef MODULE
 /* possible interrupt channels */
 static unsigned short intrs[] = { 10, 11, 12, 15 };
 #define INTR_COUNT ARRAY_SIZE(intrs)
+#endif /* !MODULE */
 
 /* signatures for NCR 53c406a based controllers */
 #if USE_BIOS
@@ -605,6 +607,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
        return 0;
 }
 
+#ifndef MODULE
 /* called from init/main.c */
 static int __init NCR53c406a_setup(char *str)
 {
@@ -661,6 +664,8 @@ static int __init NCR53c406a_setup(char *str)
 
 __setup("ncr53c406a=", NCR53c406a_setup);
 
+#endif /* !MODULE */
+
 static const char *NCR53c406a_info(struct Scsi_Host *SChost)
 {
        DEB(printk("NCR53c406a_info called\n"));
index eb3ed91bac796674a3ab0ce485282bcec7ab8490..4f8b4c53d435867da99381898093ab5cc393dec5 100644 (file)
@@ -11,8 +11,8 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 2409
-# define AAC_DRIVER_BRANCH "-mh2"
+# define AAC_DRIVER_BUILD 2423
+# define AAC_DRIVER_BRANCH "-mh3"
 #endif
 #define MAXIMUM_NUM_CONTAINERS 32
 
index 19e42ac07cb228dd09dc771ec9a85f6513f877fd..4893a6d06a332ac6f32500f3e0ebc3a1a7183838 100644 (file)
@@ -518,6 +518,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                         */
                        unsigned long count = 36000000L; /* 3 minutes */
                        while (down_trylock(&fibptr->event_wait)) {
+                               int blink;
                                if (--count == 0) {
                                        spin_lock_irqsave(q->lock, qflags);
                                        q->numpending--;
@@ -530,6 +531,14 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        }
                                        return -ETIMEDOUT;
                                }
+                               if ((blink = aac_adapter_check_health(dev)) > 0) {
+                                       if (wait == -1) {
+                                               printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
+                                                 "Usually a result of a serious unrecoverable hardware problem\n",
+                                                 blink);
+                                       }
+                                       return -EFAULT;
+                               }
                                udelay(5);
                        }
                } else if (down_interruptible(&fibptr->event_wait)) {
@@ -1093,6 +1102,20 @@ static int _aac_reset_adapter(struct aac_dev *aac)
                goto out;
        }
 
+       /*
+        *      Loop through the fibs, close the synchronous FIBS
+        */
+       for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+               struct fib *fib = &aac->fibs[index];
+               if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+                 (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
+                       unsigned long flagv;
+                       spin_lock_irqsave(&fib->event_lock, flagv);
+                       up(&fib->event_wait);
+                       spin_unlock_irqrestore(&fib->event_lock, flagv);
+                       schedule();
+               }
+       }
        index = aac->cardtype;
 
        /*
index 306f46b85a5522aff952da4fe6e0759a86b2a651..0cec742d12e9dc52bf4f895d2a20ec1feece0cae 100644 (file)
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq;
  * Run service completions on the card with interrupts enabled.
  *
  */
-static void run(void)
+static void run(struct work_struct *work)
 {
        struct aha152x_hostdata *hd;
 
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id)
                HOSTDATA(shpnt)->service=1;
 
                /* Poke the BH handler */
-               INIT_WORK(&aha152x_tq, (void *) run, NULL);
+               INIT_WORK(&aha152x_tq, run);
                schedule_work(&aha152x_tq);
        }
        DO_UNLOCK(flags);
index c3c38a7e8d32d091a3bbd7ee925d7e81bc1492d2..d7af9c63a04d66d2f4dd2536725c39a901fe7d6f 100644 (file)
@@ -586,7 +586,7 @@ static struct scsi_host_template aha1740_template = {
 
 static int aha1740_probe (struct device *dev)
 {
-       int slotbase;
+       int slotbase, rc;
        unsigned int irq_level, irq_type, translation;
        struct Scsi_Host *shpnt;
        struct aha1740_hostdata *host;
@@ -641,10 +641,16 @@ static int aha1740_probe (struct device *dev)
        }
 
        eisa_set_drvdata (edev, shpnt);
-       scsi_add_host (shpnt, dev); /* XXX handle failure */
+
+       rc = scsi_add_host (shpnt, dev);
+       if (rc)
+               goto err_irq;
+
        scsi_scan_host (shpnt);
        return 0;
 
+ err_irq:
+       free_irq(irq_level, shpnt);
  err_unmap:
        dma_unmap_single (&edev->dev, host->ecb_dma_addr,
                          sizeof (host->ecb), DMA_BIDIRECTIONAL);
index 2001fe890e71651ed36eb5cdc701fcf28ee74e02..1a3ab6aa856bb158e79574a4856e8374bbeb5127 100644 (file)
@@ -62,6 +62,7 @@ static struct pci_device_id ahd_linux_pci_id_table[] = {
        /* aic7901 based controllers */
        ID(ID_AHA_29320A),
        ID(ID_AHA_29320ALP),
+       ID(ID_AHA_29320LPE),
        /* aic7902 based controllers */
        ID(ID_AHA_29320),
        ID(ID_AHA_29320B),
index c07735819cd1d1aba2f3cabe9085d9d4e0dc6414..2cf7bb3123f099c732ba665c514b085ee0eb60e1 100644 (file)
@@ -109,7 +109,13 @@ static struct ahd_pci_identity ahd_pci_ident_table [] =
        {
                ID_AHA_29320ALP,
                ID_ALL_MASK,
-               "Adaptec 29320ALP Ultra320 SCSI adapter",
+               "Adaptec 29320ALP PCIx Ultra320 SCSI adapter",
+               ahd_aic7901_setup
+       },
+       {
+               ID_AHA_29320LPE,
+               ID_ALL_MASK,
+               "Adaptec 29320LPE PCIe Ultra320 SCSI adapter",
                ahd_aic7901_setup
        },
        /* aic7901A based controllers */
index da45153668c7589587c4a3a831125c26d24db66d..16b7c70a673cecd7efca7b224fe10cc44f72ebf8 100644 (file)
@@ -51,6 +51,7 @@
 #define ID_AIC7901                     0x800F9005FFFF9005ull
 #define ID_AHA_29320A                  0x8000900500609005ull
 #define ID_AHA_29320ALP                        0x8017900500449005ull
+#define ID_AHA_29320LPE                        0x8017900500459005ull
 
 #define ID_AIC7901A                    0x801E9005FFFF9005ull
 #define ID_AHA_29320LP                 0x8014900500449005ull
index 71a031df7a34320af014a1a64dc66364e3bae348..32f513b1b78ab314253e7ddceb4117aa5e40fcad 100644 (file)
@@ -56,8 +56,8 @@
 /* 2*ITNL timeout + 1 second */
 #define AIC94XX_SCB_TIMEOUT  (5*HZ)
 
-extern kmem_cache_t *asd_dma_token_cache;
-extern kmem_cache_t *asd_ascb_cache;
+extern struct kmem_cache *asd_dma_token_cache;
+extern struct kmem_cache *asd_ascb_cache;
 extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
 
 static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
index af7e011343643fab6b8404d2a04f419707eb2018..da94e126ca832d5e4d157c65c3b34acaab20620c 100644 (file)
@@ -1047,7 +1047,7 @@ irqreturn_t asd_hw_isr(int irq, void *dev_id)
 static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
                                              gfp_t gfp_flags)
 {
-       extern kmem_cache_t *asd_ascb_cache;
+       extern struct kmem_cache *asd_ascb_cache;
        struct asd_seq_data *seq = &asd_ha->seq;
        struct asd_ascb *ascb;
        unsigned long flags;
index 57c5ba4043f29f99c9410ce039114bc2edc94134..fbc82b00a418cf8ff6d43603a36ca75f2a83fa81 100644 (file)
@@ -450,8 +450,8 @@ static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
        asd_ha->scb_pool = NULL;
 }
 
-kmem_cache_t *asd_dma_token_cache;
-kmem_cache_t *asd_ascb_cache;
+struct kmem_cache *asd_dma_token_cache;
+struct kmem_cache *asd_ascb_cache;
 
 static int asd_create_global_caches(void)
 {
@@ -724,6 +724,15 @@ static void asd_free_queues(struct asd_ha_struct *asd_ha)
 
        list_for_each_safe(pos, n, &pending) {
                struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list);
+               /*
+                * Delete unexpired ascb timers.  This may happen if we issue
+                * a CONTROL PHY scb to an adapter and rmmod before the scb
+                * times out.  Apparently we don't wait for the CONTROL PHY
+                * to complete, so it doesn't matter if we kill the timer.
+                */
+               del_timer_sync(&ascb->timer);
+               WARN_ON(ascb->scb->header.opcode != CONTROL_PHY);
+
                list_del_init(pos);
                ASD_DPRINTK("freeing from pending\n");
                asd_ascb_free(ascb);
index b15caf1c8fa21c2ffed74c9b1d2ed73259133ae3..75ed6b0569d1ad9243e8be51e265f7df144abbd6 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <linux/pci.h>
+#include <scsi/scsi_host.h>
 
 #include "aic94xx.h"
 #include "aic94xx_reg.h"
@@ -412,6 +413,40 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id)
        }
 }
 
+/* hard reset a phy later */
+static void do_phy_reset_later(struct work_struct *work)
+{
+       struct sas_phy *sas_phy =
+               container_of(work, struct sas_phy, reset_work);
+       int error;
+
+       ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__,
+                   sas_phy->identify.phy_identifier);
+       /* Reset device port */
+       error = sas_phy_reset(sas_phy, 1);
+       if (error)
+               ASD_DPRINTK("%s: Hard reset of phy %d failed (%d).\n",
+                           __FUNCTION__, sas_phy->identify.phy_identifier, error);
+}
+
+static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost)
+{
+       INIT_WORK(&sas_phy->reset_work, do_phy_reset_later);
+       queue_work(shost->work_q, &sas_phy->reset_work);
+}
+
+/* start up the ABORT TASK tmf... */
+static void task_kill_later(struct asd_ascb *ascb)
+{
+       struct asd_ha_struct *asd_ha = ascb->ha;
+       struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
+       struct Scsi_Host *shost = sas_ha->core.shost;
+       struct sas_task *task = ascb->uldd_task;
+
+       INIT_WORK(&task->abort_work, sas_task_abort);
+       queue_work(shost->work_q, &task->abort_work);
+}
+
 static void escb_tasklet_complete(struct asd_ascb *ascb,
                                  struct done_list_struct *dl)
 {
@@ -439,6 +474,74 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
                            ascb->scb->header.opcode);
        }
 
+       /* Catch these before we mask off the sb_opcode bits */
+       switch (sb_opcode) {
+       case REQ_TASK_ABORT: {
+               struct asd_ascb *a, *b;
+               u16 tc_abort;
+
+               tc_abort = *((u16*)(&dl->status_block[1]));
+               tc_abort = le16_to_cpu(tc_abort);
+
+               ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
+                           __FUNCTION__, dl->status_block[3]);
+
+               /* Find the pending task and abort it. */
+               list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list)
+                       if (a->tc_index == tc_abort) {
+                               task_kill_later(a);
+                               break;
+                       }
+               goto out;
+       }
+       case REQ_DEVICE_RESET: {
+               struct Scsi_Host *shost = sas_ha->core.shost;
+               struct sas_phy *dev_phy;
+               struct asd_ascb *a;
+               u16 conn_handle;
+
+               conn_handle = *((u16*)(&dl->status_block[1]));
+               conn_handle = le16_to_cpu(conn_handle);
+
+               ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__,
+                           dl->status_block[3]);
+
+               /* Kill all pending tasks and reset the device */
+               dev_phy = NULL;
+               list_for_each_entry(a, &asd_ha->seq.pend_q, list) {
+                       struct sas_task *task;
+                       struct domain_device *dev;
+                       u16 x;
+
+                       task = a->uldd_task;
+                       if (!task)
+                               continue;
+                       dev = task->dev;
+
+                       x = (unsigned long)dev->lldd_dev;
+                       if (x == conn_handle) {
+                               dev_phy = dev->port->phy;
+                               task_kill_later(a);
+                       }
+               }
+
+               /* Reset device port */
+               if (!dev_phy) {
+                       ASD_DPRINTK("%s: No pending commands; can't reset.\n",
+                                   __FUNCTION__);
+                       goto out;
+               }
+               phy_reset_later(dev_phy, shost);
+               goto out;
+       }
+       case SIGNAL_NCQ_ERROR:
+               ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__);
+               goto out;
+       case CLEAR_NCQ_ERROR:
+               ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__);
+               goto out;
+       }
+
        sb_opcode &= ~DL_PHY_MASK;
 
        switch (sb_opcode) {
@@ -469,22 +572,6 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
                asd_deform_port(asd_ha, phy);
                sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
                break;
-       case REQ_TASK_ABORT:
-               ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__,
-                           phy_id);
-               break;
-       case REQ_DEVICE_RESET:
-               ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__,
-                           phy_id);
-               break;
-       case SIGNAL_NCQ_ERROR:
-               ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__,
-                           phy_id);
-               break;
-       case CLEAR_NCQ_ERROR:
-               ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__,
-                           phy_id);
-               break;
        default:
                ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__,
                            phy_id, sb_opcode);
@@ -504,7 +591,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
 
                break;
        }
-
+out:
        asd_invalidate_edb(ascb, edb);
 }
 
index ef8285c326e42f67547a390a397d01ab1d34ef21..668569e8856bdb635ad90d9aaa718abcedf0b754 100644 (file)
@@ -294,6 +294,7 @@ static struct Scsi_Host *hosts[FD_MAX_HOSTS + 1] = { NULL };
 static int user_fifo_count = 0;
 static int user_fifo_size = 0;
 
+#ifndef MODULE
 static int __init fd_mcs_setup(char *str)
 {
        static int done_setup = 0;
@@ -311,6 +312,7 @@ static int __init fd_mcs_setup(char *str)
 }
 
 __setup("fd_mcs=", fd_mcs_setup);
+#endif /* !MODULE */
 
 static void print_banner(struct Scsi_Host *shpnt)
 {
index 68ef1636678dfe21189a39ebb10accba5068e70e..38c3a291efacd532cfdba99d7e7c291fc8de0395 100644 (file)
@@ -263,6 +263,10 @@ static void scsi_host_dev_release(struct device *dev)
                kthread_stop(shost->ehandler);
        if (shost->work_q)
                destroy_workqueue(shost->work_q);
+       if (shost->uspace_req_q) {
+               kfree(shost->uspace_req_q->queuedata);
+               scsi_free_queue(shost->uspace_req_q);
+       }
 
        scsi_destroy_command_freelist(shost);
        if (shost->bqt)
@@ -301,8 +305,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        if (!shost)
                return NULL;
 
-       spin_lock_init(&shost->default_lock);
-       scsi_assign_lock(shost, &shost->default_lock);
+       shost->host_lock = &shost->default_lock;
+       spin_lock_init(shost->host_lock);
        shost->shost_state = SHOST_CREATED;
        INIT_LIST_HEAD(&shost->__devices);
        INIT_LIST_HEAD(&shost->__targets);
index 4e247b6b8700b9b6c0d3195e83b0c2e4f7eeeeb6..6ac0633d5452af8c36a0683653485980335950e3 100644 (file)
@@ -3,3 +3,5 @@ obj-$(CONFIG_SCSI_IBMVSCSI)     += ibmvscsic.o
 ibmvscsic-y                    += ibmvscsi.o
 ibmvscsic-$(CONFIG_PPC_ISERIES)        += iseries_vscsi.o 
 ibmvscsic-$(CONFIG_PPC_PSERIES)        += rpa_vscsi.o 
+
+obj-$(CONFIG_SCSI_IBMVSCSIS)   += ibmvstgt.o
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
new file mode 100644 (file)
index 0000000..e28260f
--- /dev/null
@@ -0,0 +1,960 @@
+/*
+ * IBM eServer i/pSeries Virtual SCSI Target Driver
+ * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
+ *                        Santiago Leon (santil@us.ibm.com) IBM Corp.
+ *                        Linda Xie (lxie@us.ibm.com) IBM Corp.
+ *
+ * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/libsrp.h>
+#include <asm/hvcall.h>
+#include <asm/iommu.h>
+#include <asm/prom.h>
+#include <asm/vio.h>
+
+#include "ibmvscsi.h"
+
+#define        INITIAL_SRP_LIMIT       16
+#define        DEFAULT_MAX_SECTORS     512
+
+#define        TGT_NAME        "ibmvstgt"
+
+/*
+ * Hypervisor calls.
+ */
+#define h_copy_rdma(l, sa, sb, da, db) \
+                       plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
+#define h_send_crq(ua, l, h) \
+                       plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
+#define h_reg_crq(ua, tok, sz)\
+                       plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
+#define h_free_crq(ua) \
+                       plpar_hcall_norets(H_FREE_CRQ, ua);
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)                                  \
+do {                                                           \
+       printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);  \
+} while (0)
+/* #define dprintk eprintk */
+#define dprintk(fmt, args...)
+
+struct vio_port {
+       struct vio_dev *dma_dev;
+
+       struct crq_queue crq_queue;
+       struct work_struct crq_work;
+
+       unsigned long liobn;
+       unsigned long riobn;
+       struct srp_target *target;
+};
+
+static struct workqueue_struct *vtgtd;
+
+/*
+ * These are fixed for the system and come from the Open Firmware device tree.
+ * We just store them here to save getting them every time.
+ */
+static char system_id[64] = "";
+static char partition_name[97] = "UNKNOWN";
+static unsigned int partition_number = -1;
+
+static struct vio_port *target_to_port(struct srp_target *target)
+{
+       return (struct vio_port *) target->ldata;
+}
+
+static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
+{
+       return (union viosrp_iu *) (iue->sbuf->buf);
+}
+
+static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
+{
+       struct srp_target *target = iue->target;
+       struct vio_port *vport = target_to_port(target);
+       long rc, rc1;
+       union {
+               struct viosrp_crq cooked;
+               uint64_t raw[2];
+       } crq;
+
+       /* First copy the SRP */
+       rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
+                        vport->riobn, iue->remote_token);
+
+       if (rc)
+               eprintk("Error %ld transferring data\n", rc);
+
+       crq.cooked.valid = 0x80;
+       crq.cooked.format = format;
+       crq.cooked.reserved = 0x00;
+       crq.cooked.timeout = 0x00;
+       crq.cooked.IU_length = length;
+       crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
+
+       if (rc == 0)
+               crq.cooked.status = 0x99;       /* Just needs to be non-zero */
+       else
+               crq.cooked.status = 0x00;
+
+       rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
+
+       if (rc1) {
+               eprintk("%ld sending response\n", rc1);
+               return rc1;
+       }
+
+       return rc;
+}
+
+#define SRP_RSP_SENSE_DATA_LEN 18
+
+static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
+                   unsigned char status, unsigned char asc)
+{
+       union viosrp_iu *iu = vio_iu(iue);
+       uint64_t tag = iu->srp.rsp.tag;
+
+       /* If the linked bit is on and status is good */
+       if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
+               status = 0x10;
+
+       memset(iu, 0, sizeof(struct srp_rsp));
+       iu->srp.rsp.opcode = SRP_RSP;
+       iu->srp.rsp.req_lim_delta = 1;
+       iu->srp.rsp.tag = tag;
+
+       if (test_bit(V_DIOVER, &iue->flags))
+               iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
+
+       iu->srp.rsp.data_in_res_cnt = 0;
+       iu->srp.rsp.data_out_res_cnt = 0;
+
+       iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
+
+       iu->srp.rsp.resp_data_len = 0;
+       iu->srp.rsp.status = status;
+       if (status) {
+               uint8_t *sense = iu->srp.rsp.data;
+
+               if (sc) {
+                       iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
+                       iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
+                       memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+               } else {
+                       iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
+                       iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
+                       iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
+
+                       /* Valid bit and 'current errors' */
+                       sense[0] = (0x1 << 7 | 0x70);
+                       /* Sense key */
+                       sense[2] = status;
+                       /* Additional sense length */
+                       sense[7] = 0xa; /* 10 bytes */
+                       /* Additional sense code */
+                       sense[12] = asc;
+               }
+       }
+
+       send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
+               VIOSRP_SRP_FORMAT);
+
+       return 0;
+}
+
+static void handle_cmd_queue(struct srp_target *target)
+{
+       struct Scsi_Host *shost = target->shost;
+       struct iu_entry *iue;
+       struct srp_cmd *cmd;
+       unsigned long flags;
+       int err;
+
+retry:
+       spin_lock_irqsave(&target->lock, flags);
+
+       list_for_each_entry(iue, &target->cmd_queue, ilist) {
+               if (!test_and_set_bit(V_FLYING, &iue->flags)) {
+                       spin_unlock_irqrestore(&target->lock, flags);
+                       cmd = iue->sbuf->buf;
+                       err = srp_cmd_queue(shost, cmd, iue, 0);
+                       if (err) {
+                               eprintk("cannot queue cmd %p %d\n", cmd, err);
+                               srp_iu_put(iue);
+                       }
+                       goto retry;
+               }
+       }
+
+       spin_unlock_irqrestore(&target->lock, flags);
+}
+
+static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
+                        struct srp_direct_buf *md, int nmd,
+                        enum dma_data_direction dir, unsigned int rest)
+{
+       struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+       struct srp_target *target = iue->target;
+       struct vio_port *vport = target_to_port(target);
+       dma_addr_t token;
+       long err;
+       unsigned int done = 0;
+       int i, sidx, soff;
+
+       sidx = soff = 0;
+       token = sg_dma_address(sg + sidx);
+
+       for (i = 0; i < nmd && rest; i++) {
+               unsigned int mdone, mlen;
+
+               mlen = min(rest, md[i].len);
+               for (mdone = 0; mlen;) {
+                       int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
+
+                       if (dir == DMA_TO_DEVICE)
+                               err = h_copy_rdma(slen,
+                                                 vport->riobn,
+                                                 md[i].va + mdone,
+                                                 vport->liobn,
+                                                 token + soff);
+                       else
+                               err = h_copy_rdma(slen,
+                                                 vport->liobn,
+                                                 token + soff,
+                                                 vport->riobn,
+                                                 md[i].va + mdone);
+
+                       if (err != H_SUCCESS) {
+                               eprintk("rdma error %d %d\n", dir, slen);
+                               goto out;
+                       }
+
+                       mlen -= slen;
+                       mdone += slen;
+                       soff += slen;
+                       done += slen;
+
+                       if (soff == sg_dma_len(sg + sidx)) {
+                               sidx++;
+                               soff = 0;
+                               token = sg_dma_address(sg + sidx);
+
+                               if (sidx > nsg) {
+                                       eprintk("out of sg %p %d %d\n",
+                                               iue, sidx, nsg);
+                                       goto out;
+                               }
+                       }
+               };
+
+               rest -= mlen;
+       }
+out:
+
+       return 0;
+}
+
+static int ibmvstgt_transfer_data(struct scsi_cmnd *sc,
+                                 void (*done)(struct scsi_cmnd *))
+{
+       struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+       int err;
+
+       err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
+
+       done(sc);
+
+       return err;
+}
+
+static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
+                            void (*done)(struct scsi_cmnd *))
+{
+       unsigned long flags;
+       struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+       struct srp_target *target = iue->target;
+
+       dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
+
+       spin_lock_irqsave(&target->lock, flags);
+       list_del(&iue->ilist);
+       spin_unlock_irqrestore(&target->lock, flags);
+
+       if (sc->result != SAM_STAT_GOOD) {
+               eprintk("operation failed %p %d %x\n",
+                       iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
+               send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
+       } else
+               send_rsp(iue, sc, NO_SENSE, 0x00);
+
+       done(sc);
+       srp_iu_put(iue);
+       return 0;
+}
+
+int send_adapter_info(struct iu_entry *iue,
+                     dma_addr_t remote_buffer, uint16_t length)
+{
+       struct srp_target *target = iue->target;
+       struct vio_port *vport = target_to_port(target);
+       struct Scsi_Host *shost = target->shost;
+       dma_addr_t data_token;
+       struct mad_adapter_info_data *info;
+       int err;
+
+       info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
+                                 GFP_KERNEL);
+       if (!info) {
+               eprintk("bad dma_alloc_coherent %p\n", target);
+               return 1;
+       }
+
+       /* Get remote info */
+       err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
+                         vport->liobn, data_token);
+       if (err == H_SUCCESS) {
+               dprintk("Client connect: %s (%d)\n",
+                       info->partition_name, info->partition_number);
+       }
+
+       memset(info, 0, sizeof(*info));
+
+       strcpy(info->srp_version, "16.a");
+       strncpy(info->partition_name, partition_name,
+               sizeof(info->partition_name));
+       info->partition_number = partition_number;
+       info->mad_version = 1;
+       info->os_type = 2;
+       info->port_max_txu[0] = shost->hostt->max_sectors << 9;
+
+       /* Send our info to remote */
+       err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
+                         vport->riobn, remote_buffer);
+
+       dma_free_coherent(target->dev, sizeof(*info), info, data_token);
+
+       if (err != H_SUCCESS) {
+               eprintk("Error sending adapter info %d\n", err);
+               return 1;
+       }
+
+       return 0;
+}
+
+static void process_login(struct iu_entry *iue)
+{
+       union viosrp_iu *iu = vio_iu(iue);
+       struct srp_login_rsp *rsp = &iu->srp.login_rsp;
+       uint64_t tag = iu->srp.rsp.tag;
+
+       /* TODO handle case that requested size is wrong and
+        * buffer format is wrong
+        */
+       memset(iu, 0, sizeof(struct srp_login_rsp));
+       rsp->opcode = SRP_LOGIN_RSP;
+       rsp->req_lim_delta = INITIAL_SRP_LIMIT;
+       rsp->tag = tag;
+       rsp->max_it_iu_len = sizeof(union srp_iu);
+       rsp->max_ti_iu_len = sizeof(union srp_iu);
+       /* direct and indirect */
+       rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
+
+       send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
+}
+
+static inline void queue_cmd(struct iu_entry *iue)
+{
+       struct srp_target *target = iue->target;
+       unsigned long flags;
+
+       spin_lock_irqsave(&target->lock, flags);
+       list_add_tail(&iue->ilist, &target->cmd_queue);
+       spin_unlock_irqrestore(&target->lock, flags);
+}
+
+static int process_tsk_mgmt(struct iu_entry *iue)
+{
+       union viosrp_iu *iu = vio_iu(iue);
+       int fn;
+
+       dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
+
+       switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
+       case SRP_TSK_ABORT_TASK:
+               fn = ABORT_TASK;
+               break;
+       case SRP_TSK_ABORT_TASK_SET:
+               fn = ABORT_TASK_SET;
+               break;
+       case SRP_TSK_CLEAR_TASK_SET:
+               fn = CLEAR_TASK_SET;
+               break;
+       case SRP_TSK_LUN_RESET:
+               fn = LOGICAL_UNIT_RESET;
+               break;
+       case SRP_TSK_CLEAR_ACA:
+               fn = CLEAR_ACA;
+               break;
+       default:
+               fn = 0;
+       }
+       if (fn)
+               scsi_tgt_tsk_mgmt_request(iue->target->shost, fn,
+                                         iu->srp.tsk_mgmt.task_tag,
+                                         (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
+                                         iue);
+       else
+               send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
+
+       return !fn;
+}
+
+static int process_mad_iu(struct iu_entry *iue)
+{
+       union viosrp_iu *iu = vio_iu(iue);
+       struct viosrp_adapter_info *info;
+       struct viosrp_host_config *conf;
+
+       switch (iu->mad.empty_iu.common.type) {
+       case VIOSRP_EMPTY_IU_TYPE:
+               eprintk("%s\n", "Unsupported EMPTY MAD IU");
+               break;
+       case VIOSRP_ERROR_LOG_TYPE:
+               eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
+               iu->mad.error_log.common.status = 1;
+               send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
+               break;
+       case VIOSRP_ADAPTER_INFO_TYPE:
+               info = &iu->mad.adapter_info;
+               info->common.status = send_adapter_info(iue, info->buffer,
+                                                       info->common.length);
+               send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
+               break;
+       case VIOSRP_HOST_CONFIG_TYPE:
+               conf = &iu->mad.host_config;
+               conf->common.status = 1;
+               send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
+               break;
+       default:
+               eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
+       }
+
+       return 1;
+}
+
+static int process_srp_iu(struct iu_entry *iue)
+{
+       union viosrp_iu *iu = vio_iu(iue);
+       int done = 1;
+       u8 opcode = iu->srp.rsp.opcode;
+
+       switch (opcode) {
+       case SRP_LOGIN_REQ:
+               process_login(iue);
+               break;
+       case SRP_TSK_MGMT:
+               done = process_tsk_mgmt(iue);
+               break;
+       case SRP_CMD:
+               queue_cmd(iue);
+               done = 0;
+               break;
+       case SRP_LOGIN_RSP:
+       case SRP_I_LOGOUT:
+       case SRP_T_LOGOUT:
+       case SRP_RSP:
+       case SRP_CRED_REQ:
+       case SRP_CRED_RSP:
+       case SRP_AER_REQ:
+       case SRP_AER_RSP:
+               eprintk("Unsupported type %u\n", opcode);
+               break;
+       default:
+               eprintk("Unknown type %u\n", opcode);
+       }
+
+       return done;
+}
+
+static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
+{
+       struct vio_port *vport = target_to_port(target);
+       struct iu_entry *iue;
+       long err, done;
+
+       iue = srp_iu_get(target);
+       if (!iue) {
+               eprintk("Error getting IU from pool, %p\n", target);
+               return;
+       }
+
+       iue->remote_token = crq->IU_data_ptr;
+
+       err = h_copy_rdma(crq->IU_length, vport->riobn,
+                         iue->remote_token, vport->liobn, iue->sbuf->dma);
+
+       if (err != H_SUCCESS) {
+               eprintk("%ld transferring data error %p\n", err, iue);
+               done = 1;
+               goto out;
+       }
+
+       if (crq->format == VIOSRP_MAD_FORMAT)
+               done = process_mad_iu(iue);
+       else
+               done = process_srp_iu(iue);
+out:
+       if (done)
+               srp_iu_put(iue);
+}
+
+static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
+{
+       struct srp_target *target = (struct srp_target *) data;
+       struct vio_port *vport = target_to_port(target);
+
+       vio_disable_interrupts(vport->dma_dev);
+       queue_work(vtgtd, &vport->crq_work);
+
+       return IRQ_HANDLED;
+}
+
+static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
+{
+       int err;
+       struct vio_port *vport = target_to_port(target);
+
+       queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
+       if (!queue->msgs)
+               goto malloc_failed;
+       queue->size = PAGE_SIZE / sizeof(*queue->msgs);
+
+       queue->msg_token = dma_map_single(target->dev, queue->msgs,
+                                         queue->size * sizeof(*queue->msgs),
+                                         DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(queue->msg_token))
+               goto map_failed;
+
+       err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
+                       PAGE_SIZE);
+
+       /* If the adapter was left active for some reason (like kexec)
+        * try freeing and re-registering
+        */
+       if (err == H_RESOURCE) {
+           do {
+               err = h_free_crq(vport->dma_dev->unit_address);
+           } while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+           err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
+                           PAGE_SIZE);
+       }
+
+       if (err != H_SUCCESS && err != 2) {
+               eprintk("Error 0x%x opening virtual adapter\n", err);
+               goto reg_crq_failed;
+       }
+
+       err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
+                         SA_INTERRUPT, "ibmvstgt", target);
+       if (err)
+               goto req_irq_failed;
+
+       vio_enable_interrupts(vport->dma_dev);
+
+       h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
+
+       queue->cur = 0;
+       spin_lock_init(&queue->lock);
+
+       return 0;
+
+req_irq_failed:
+       do {
+               err = h_free_crq(vport->dma_dev->unit_address);
+       } while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+reg_crq_failed:
+       dma_unmap_single(target->dev, queue->msg_token,
+                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+map_failed:
+       free_page((unsigned long) queue->msgs);
+
+malloc_failed:
+       return -ENOMEM;
+}
+
+static void crq_queue_destroy(struct srp_target *target)
+{
+       struct vio_port *vport = target_to_port(target);
+       struct crq_queue *queue = &vport->crq_queue;
+       int err;
+
+       free_irq(vport->dma_dev->irq, target);
+       do {
+               err = h_free_crq(vport->dma_dev->unit_address);
+       } while (err == H_BUSY || H_IS_LONG_BUSY(err));
+
+       dma_unmap_single(target->dev, queue->msg_token,
+                        queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
+
+       free_page((unsigned long) queue->msgs);
+}
+
+static void process_crq(struct viosrp_crq *crq,        struct srp_target *target)
+{
+       struct vio_port *vport = target_to_port(target);
+       dprintk("%x %x\n", crq->valid, crq->format);
+
+       switch (crq->valid) {
+       case 0xC0:
+               /* initialization */
+               switch (crq->format) {
+               case 0x01:
+                       h_send_crq(vport->dma_dev->unit_address,
+                                  0xC002000000000000, 0);
+                       break;
+               case 0x02:
+                       break;
+               default:
+                       eprintk("Unknown format %u\n", crq->format);
+               }
+               break;
+       case 0xFF:
+               /* transport event */
+               break;
+       case 0x80:
+               /* real payload */
+               switch (crq->format) {
+               case VIOSRP_SRP_FORMAT:
+               case VIOSRP_MAD_FORMAT:
+                       process_iu(crq, target);
+                       break;
+               case VIOSRP_OS400_FORMAT:
+               case VIOSRP_AIX_FORMAT:
+               case VIOSRP_LINUX_FORMAT:
+               case VIOSRP_INLINE_FORMAT:
+                       eprintk("Unsupported format %u\n", crq->format);
+                       break;
+               default:
+                       eprintk("Unknown format %u\n", crq->format);
+               }
+               break;
+       default:
+               eprintk("unknown message type 0x%02x!?\n", crq->valid);
+       }
+}
+
+static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
+{
+       struct viosrp_crq *crq;
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->lock, flags);
+       crq = &queue->msgs[queue->cur];
+       if (crq->valid & 0x80) {
+               if (++queue->cur == queue->size)
+                       queue->cur = 0;
+       } else
+               crq = NULL;
+       spin_unlock_irqrestore(&queue->lock, flags);
+
+       return crq;
+}
+
+static void handle_crq(struct work_struct *work)
+{
+       struct vio_port *vport = container_of(work, struct vio_port, crq_work);
+       struct srp_target *target = vport->target;
+       struct viosrp_crq *crq;
+       int done = 0;
+
+       while (!done) {
+               while ((crq = next_crq(&vport->crq_queue)) != NULL) {
+                       process_crq(crq, target);
+                       crq->valid = 0x00;
+               }
+
+               vio_enable_interrupts(vport->dma_dev);
+
+               crq = next_crq(&vport->crq_queue);
+               if (crq) {
+                       vio_disable_interrupts(vport->dma_dev);
+                       process_crq(crq, target);
+                       crq->valid = 0x00;
+               } else
+                       done = 1;
+       }
+
+       handle_cmd_queue(target);
+}
+
+
+static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
+{
+       unsigned long flags;
+       struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
+       struct srp_target *target = iue->target;
+
+       dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
+
+       spin_lock_irqsave(&target->lock, flags);
+       list_del(&iue->ilist);
+       spin_unlock_irqrestore(&target->lock, flags);
+
+       srp_iu_put(iue);
+
+       return 0;
+}
+
+static int ibmvstgt_tsk_mgmt_response(u64 mid, int result)
+{
+       struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
+       union viosrp_iu *iu = vio_iu(iue);
+       unsigned char status, asc;
+
+       eprintk("%p %d\n", iue, result);
+       status = NO_SENSE;
+       asc = 0;
+
+       switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
+       case SRP_TSK_ABORT_TASK:
+               asc = 0x14;
+               if (result)
+                       status = ABORTED_COMMAND;
+               break;
+       default:
+               break;
+       }
+
+       send_rsp(iue, NULL, status, asc);
+       srp_iu_put(iue);
+
+       return 0;
+}
+
+static ssize_t system_id_show(struct class_device *cdev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
+}
+
+static ssize_t partition_number_show(struct class_device *cdev, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
+}
+
+static ssize_t unit_address_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct srp_target *target = host_to_srp_target(shost);
+       struct vio_port *vport = target_to_port(target);
+       return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
+}
+
+static CLASS_DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
+static CLASS_DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
+static CLASS_DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
+
+static struct class_device_attribute *ibmvstgt_attrs[] = {
+       &class_device_attr_system_id,
+       &class_device_attr_partition_number,
+       &class_device_attr_unit_address,
+       NULL,
+};
+
+static struct scsi_host_template ibmvstgt_sht = {
+       .name                   = TGT_NAME,
+       .module                 = THIS_MODULE,
+       .can_queue              = INITIAL_SRP_LIMIT,
+       .sg_tablesize           = SG_ALL,
+       .use_clustering         = DISABLE_CLUSTERING,
+       .max_sectors            = DEFAULT_MAX_SECTORS,
+       .transfer_response      = ibmvstgt_cmd_done,
+       .transfer_data          = ibmvstgt_transfer_data,
+       .eh_abort_handler       = ibmvstgt_eh_abort_handler,
+       .tsk_mgmt_response      = ibmvstgt_tsk_mgmt_response,
+       .shost_attrs            = ibmvstgt_attrs,
+       .proc_name              = TGT_NAME,
+};
+
+static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
+{
+       struct Scsi_Host *shost;
+       struct srp_target *target;
+       struct vio_port *vport;
+       unsigned int *dma, dma_size;
+       int err = -ENOMEM;
+
+       vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
+       if (!vport)
+               return err;
+       shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
+       if (!shost)
+               goto free_vport;
+       err = scsi_tgt_alloc_queue(shost);
+       if (err)
+               goto put_host;
+
+       target = host_to_srp_target(shost);
+       target->shost = shost;
+       vport->dma_dev = dev;
+       target->ldata = vport;
+       vport->target = target;
+       err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
+                              SRP_MAX_IU_LEN);
+       if (err)
+               goto put_host;
+
+       dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
+                                                &dma_size);
+       if (!dma || dma_size != 40) {
+               eprintk("Couldn't get window property %d\n", dma_size);
+               err = -EIO;
+               goto free_srp_target;
+       }
+       vport->liobn = dma[0];
+       vport->riobn = dma[5];
+
+       INIT_WORK(&vport->crq_work, handle_crq);
+
+       err = crq_queue_create(&vport->crq_queue, target);
+       if (err)
+               goto free_srp_target;
+
+       err = scsi_add_host(shost, target->dev);
+       if (err)
+               goto destroy_queue;
+       return 0;
+
+destroy_queue:
+       crq_queue_destroy(target);
+free_srp_target:
+       srp_target_free(target);
+put_host:
+       scsi_host_put(shost);
+free_vport:
+       kfree(vport);
+       return err;
+}
+
+static int ibmvstgt_remove(struct vio_dev *dev)
+{
+       struct srp_target *target = (struct srp_target *) dev->dev.driver_data;
+       struct Scsi_Host *shost = target->shost;
+       struct vio_port *vport = target->ldata;
+
+       crq_queue_destroy(target);
+       scsi_remove_host(shost);
+       scsi_tgt_free_queue(shost);
+       srp_target_free(target);
+       kfree(vport);
+       scsi_host_put(shost);
+       return 0;
+}
+
+static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
+       {"v-scsi-host", "IBM,v-scsi-host"},
+       {"",""}
+};
+
+MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
+
+static struct vio_driver ibmvstgt_driver = {
+       .id_table = ibmvstgt_device_table,
+       .probe = ibmvstgt_probe,
+       .remove = ibmvstgt_remove,
+       .driver = {
+               .name = "ibmvscsis",
+               .owner = THIS_MODULE,
+       }
+};
+
+static int get_system_info(void)
+{
+       struct device_node *rootdn;
+       const char *id, *model, *name;
+       unsigned int *num;
+
+       rootdn = find_path_device("/");
+       if (!rootdn)
+               return -ENOENT;
+
+       model = get_property(rootdn, "model", NULL);
+       id = get_property(rootdn, "system-id", NULL);
+       if (model && id)
+               snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
+
+       name = get_property(rootdn, "ibm,partition-name", NULL);
+       if (name)
+               strncpy(partition_name, name, sizeof(partition_name));
+
+       num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL);
+       if (num)
+               partition_number = *num;
+
+       return 0;
+}
+
+static int ibmvstgt_init(void)
+{
+       int err = -ENOMEM;
+
+       printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
+
+       vtgtd = create_workqueue("ibmvtgtd");
+       if (!vtgtd)
+               return err;
+
+       err = get_system_info();
+       if (err)
+               goto destroy_wq;
+
+       err = vio_register_driver(&ibmvstgt_driver);
+       if (err)
+               goto destroy_wq;
+
+       return 0;
+
+destroy_wq:
+       destroy_workqueue(vtgtd);
+       return err;
+}
+
+static void ibmvstgt_exit(void)
+{
+       printk("Unregister IBM virtual SCSI driver\n");
+
+       destroy_workqueue(vtgtd);
+       vio_unregister_driver(&ibmvstgt_driver);
+}
+
+MODULE_DESCRIPTION("IBM Virtual SCSI Target");
+MODULE_AUTHOR("Santiago Leon");
+MODULE_LICENSE("GPL");
+
+module_init(ibmvstgt_init);
+module_exit(ibmvstgt_exit);
index 1427a41e844104149a7e71c4a3e639f01711718b..8f6b5bf580f6757fdcace40c5a41eb5359827d98 100644 (file)
@@ -110,6 +110,7 @@ typedef struct ide_scsi_obj {
 } idescsi_scsi_t;
 
 static DEFINE_MUTEX(idescsi_ref_mutex);
+static int idescsi_nocd;                       /* Set by module param to skip cd */
 
 #define ide_scsi_g(disk) \
        container_of((disk)->private_data, struct ide_scsi_obj, driver)
@@ -1127,6 +1128,9 @@ static int ide_scsi_probe(ide_drive_t *drive)
                warned = 1;
        }
 
+       if (idescsi_nocd && drive->media == ide_cdrom)
+               return -ENODEV;
+
        if (!strstr("ide-scsi", drive->driver_req) ||
            !drive->present ||
            drive->media == ide_disk ||
@@ -1187,6 +1191,8 @@ static void __exit exit_idescsi_module(void)
        driver_unregister(&idescsi_driver.gen_driver);
 }
 
+module_param(idescsi_nocd, int, 0600);
+MODULE_PARM_DESC(idescsi_nocd, "Disable handling of CD-ROMs so they may be driven by ide-cd");
 module_init(init_idescsi_module);
 module_exit(exit_idescsi_module);
 MODULE_LICENSE("GPL");
index e31f6122106f1ed830c6972be2c10e471df4e773..0464c182c5771fc44d02a3549bbd19fbd6b8d227 100644 (file)
@@ -36,7 +36,7 @@ typedef struct {
        int base_hi;            /* Hi Base address for ECP-ISA chipset */
        int mode;               /* Transfer mode                */
        struct scsi_cmnd *cur_cmd;      /* Current queued command       */
-       struct work_struct imm_tq;      /* Polling interrupt stuff       */
+       struct delayed_work imm_tq;     /* Polling interrupt stuff       */
        unsigned long jstart;   /* Jiffies at start             */
        unsigned failed:1;      /* Failure flag                 */
        unsigned dp:1;          /* Data phase present           */
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd)
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void imm_interrupt(void *data)
+static void imm_interrupt(struct work_struct *work)
 {
-       imm_struct *dev = (imm_struct *) data;
+       imm_struct *dev = container_of(work, imm_struct, imm_tq.work);
        struct scsi_cmnd *cmd = dev->cur_cmd;
        struct Scsi_Host *host = cmd->device->host;
        unsigned long flags;
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data)
                return;
        }
        if (imm_engine(dev, cmd)) {
-               INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev);
                schedule_delayed_work(&dev->imm_tq, 1);
                return;
        }
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd,
        cmd->result = DID_ERROR << 16;  /* default return code */
        cmd->SCp.phase = 0;     /* bus free */
 
-       INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
-       schedule_work(&dev->imm_tq);
+       schedule_delayed_work(&dev->imm_tq, 0);
 
        imm_pb_claim(dev);
 
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb)
        else
                ports = 8;
 
-       INIT_WORK(&dev->imm_tq, imm_interrupt, dev);
+       INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt);
 
        err = -ENOMEM;
        host = scsi_host_alloc(&imm_template, sizeof(imm_struct *));
index afed293dd7b99137bc4f57ee8819fc1248fbeec6..f160357e37a6f065b18f68cc1df2cc463455bbd4 100644 (file)
@@ -170,7 +170,7 @@ static int setup_debug = 0;
 static void i91uSCBPost(BYTE * pHcb, BYTE * pScb);
 
 /* PCI Devices supported by this driver */
-static struct pci_device_id i91u_pci_devices[] __devinitdata = {
+static struct pci_device_id i91u_pci_devices[] = {
        { PCI_VENDOR_ID_INIT,  I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        { PCI_VENDOR_ID_INIT,  I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        { PCI_VENDOR_ID_INIT,  I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
index 2dde821025f3a1d98cc676bc7a09db1d93c441c9..b318500785e58cdc9556064ee25f115feec070c6 100644 (file)
@@ -79,7 +79,6 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_eh.h>
 #include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_transport.h>
 #include "ipr.h"
 
 /*
@@ -98,7 +97,7 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
 
 /* This table describes the differences between DMA controller chips */
 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
-       { /* Gemstone, Citrine, and Obsidian */
+       { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
                .mailbox = 0x0042C,
                .cache_line_size = 0x20,
                {
@@ -135,6 +134,7 @@ static const struct ipr_chip_t ipr_chip[] = {
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, &ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
 };
@@ -1249,19 +1249,23 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
 
 /**
  * ipr_log_hex_data - Log additional hex IOA error data.
+ * @ioa_cfg:   ioa config struct
  * @data:              IOA error data
  * @len:               data length
  *
  * Return value:
  *     none
  **/
-static void ipr_log_hex_data(u32 *data, int len)
+static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
 {
        int i;
 
        if (len == 0)
                return;
 
+       if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+               len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
+
        for (i = 0; i < len / 4; i += 4) {
                ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
                        be32_to_cpu(data[i]),
@@ -1290,7 +1294,7 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
        ipr_err("%s\n", error->failure_reason);
        ipr_err("Remote Adapter VPD:\n");
        ipr_log_ext_vpd(&error->vpd);
-       ipr_log_hex_data(error->data,
+       ipr_log_hex_data(ioa_cfg, error->data,
                         be32_to_cpu(hostrcb->hcam.length) -
                         (offsetof(struct ipr_hostrcb_error, u) +
                          offsetof(struct ipr_hostrcb_type_17_error, data)));
@@ -1315,12 +1319,225 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
        ipr_err("%s\n", error->failure_reason);
        ipr_err("Remote Adapter VPD:\n");
        ipr_log_vpd(&error->vpd);
-       ipr_log_hex_data(error->data,
+       ipr_log_hex_data(ioa_cfg, error->data,
                         be32_to_cpu(hostrcb->hcam.length) -
                         (offsetof(struct ipr_hostrcb_error, u) +
                          offsetof(struct ipr_hostrcb_type_07_error, data)));
 }
 
+static const struct {
+       u8 active;
+       char *desc;
+} path_active_desc[] = {
+       { IPR_PATH_NO_INFO, "Path" },
+       { IPR_PATH_ACTIVE, "Active path" },
+       { IPR_PATH_NOT_ACTIVE, "Inactive path" }
+};
+
+static const struct {
+       u8 state;
+       char *desc;
+} path_state_desc[] = {
+       { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
+       { IPR_PATH_HEALTHY, "is healthy" },
+       { IPR_PATH_DEGRADED, "is degraded" },
+       { IPR_PATH_FAILED, "is failed" }
+};
+
+/**
+ * ipr_log_fabric_path - Log a fabric path error
+ * @hostrcb:   hostrcb struct
+ * @fabric:            fabric descriptor
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
+                               struct ipr_hostrcb_fabric_desc *fabric)
+{
+       int i, j;
+       u8 path_state = fabric->path_state;
+       u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+       u8 state = path_state & IPR_PATH_STATE_MASK;
+
+       for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+               if (path_active_desc[i].active != active)
+                       continue;
+
+               for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+                       if (path_state_desc[j].state != state)
+                               continue;
+
+                       if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
+                               ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
+                                            path_active_desc[i].desc, path_state_desc[j].desc,
+                                            fabric->ioa_port);
+                       } else if (fabric->cascaded_expander == 0xff) {
+                               ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
+                                            path_active_desc[i].desc, path_state_desc[j].desc,
+                                            fabric->ioa_port, fabric->phy);
+                       } else if (fabric->phy == 0xff) {
+                               ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
+                                            path_active_desc[i].desc, path_state_desc[j].desc,
+                                            fabric->ioa_port, fabric->cascaded_expander);
+                       } else {
+                               ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
+                                            path_active_desc[i].desc, path_state_desc[j].desc,
+                                            fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+                       }
+                       return;
+               }
+       }
+
+       ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
+               fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
+}
+
+static const struct {
+       u8 type;
+       char *desc;
+} path_type_desc[] = {
+       { IPR_PATH_CFG_IOA_PORT, "IOA port" },
+       { IPR_PATH_CFG_EXP_PORT, "Expander port" },
+       { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
+       { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
+};
+
+static const struct {
+       u8 status;
+       char *desc;
+} path_status_desc[] = {
+       { IPR_PATH_CFG_NO_PROB, "Functional" },
+       { IPR_PATH_CFG_DEGRADED, "Degraded" },
+       { IPR_PATH_CFG_FAILED, "Failed" },
+       { IPR_PATH_CFG_SUSPECT, "Suspect" },
+       { IPR_PATH_NOT_DETECTED, "Missing" },
+       { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
+};
+
+static const char *link_rate[] = {
+       "unknown",
+       "disabled",
+       "phy reset problem",
+       "spinup hold",
+       "port selector",
+       "unknown",
+       "unknown",
+       "unknown",
+       "1.5Gbps",
+       "3.0Gbps",
+       "unknown",
+       "unknown",
+       "unknown",
+       "unknown",
+       "unknown",
+       "unknown"
+};
+
+/**
+ * ipr_log_path_elem - Log a fabric path element.
+ * @hostrcb:   hostrcb struct
+ * @cfg:               fabric path element struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
+                             struct ipr_hostrcb_config_element *cfg)
+{
+       int i, j;
+       u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+       u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+
+       if (type == IPR_PATH_CFG_NOT_EXIST)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+               if (path_type_desc[i].type != type)
+                       continue;
+
+               for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+                       if (path_status_desc[j].status != status)
+                               continue;
+
+                       if (type == IPR_PATH_CFG_IOA_PORT) {
+                               ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
+                                            path_status_desc[j].desc, path_type_desc[i].desc,
+                                            cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                            be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                       } else {
+                               if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
+                                       ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
+                                                    path_status_desc[j].desc, path_type_desc[i].desc,
+                                                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                               } else if (cfg->cascaded_expander == 0xff) {
+                                       ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
+                                                    "WWN=%08X%08X\n", path_status_desc[j].desc,
+                                                    path_type_desc[i].desc, cfg->phy,
+                                                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                               } else if (cfg->phy == 0xff) {
+                                       ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
+                                                    "WWN=%08X%08X\n", path_status_desc[j].desc,
+                                                    path_type_desc[i].desc, cfg->cascaded_expander,
+                                                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                               } else {
+                                       ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
+                                                    "WWN=%08X%08X\n", path_status_desc[j].desc,
+                                                    path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
+                                                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                                                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+                               }
+                       }
+                       return;
+               }
+       }
+
+       ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
+                    "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
+                    link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+                    be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
+ * ipr_log_fabric_error - Log a fabric error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+                                struct ipr_hostrcb *hostrcb)
+{
+       struct ipr_hostrcb_type_20_error *error;
+       struct ipr_hostrcb_fabric_desc *fabric;
+       struct ipr_hostrcb_config_element *cfg;
+       int i, add_len;
+
+       error = &hostrcb->hcam.u.error.u.type_20_error;
+       error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+       ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+       add_len = be32_to_cpu(hostrcb->hcam.length) -
+               (offsetof(struct ipr_hostrcb_error, u) +
+                offsetof(struct ipr_hostrcb_type_20_error, desc));
+
+       for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+               ipr_log_fabric_path(hostrcb, fabric);
+               for_each_fabric_cfg(fabric, cfg)
+                       ipr_log_path_elem(hostrcb, cfg);
+
+               add_len -= be16_to_cpu(fabric->length);
+               fabric = (struct ipr_hostrcb_fabric_desc *)
+                       ((unsigned long)fabric + be16_to_cpu(fabric->length));
+       }
+
+       ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
 /**
  * ipr_log_generic_error - Log an adapter error.
  * @ioa_cfg:   ioa config struct
@@ -1332,7 +1549,7 @@ static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
                                  struct ipr_hostrcb *hostrcb)
 {
-       ipr_log_hex_data(hostrcb->hcam.u.raw.data,
+       ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
                         be32_to_cpu(hostrcb->hcam.length));
 }
 
@@ -1394,13 +1611,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
        if (!ipr_error_table[error_index].log_hcam)
                return;
 
-       if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
-               ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
-                          "%s\n", ipr_error_table[error_index].error);
-       } else {
-               dev_err(&ioa_cfg->pdev->dev, "%s\n",
-                       ipr_error_table[error_index].error);
-       }
+       ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
 
        /* Set indication we have logged an error */
        ioa_cfg->errors_logged++;
@@ -1437,6 +1648,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
        case IPR_HOST_RCB_OVERLAY_ID_17:
                ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
                break;
+       case IPR_HOST_RCB_OVERLAY_ID_20:
+               ipr_log_fabric_error(ioa_cfg, hostrcb);
+               break;
        case IPR_HOST_RCB_OVERLAY_ID_1:
        case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
        default:
@@ -2093,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref)
 
 /**
  * ipr_worker_thread - Worker thread
- * @data:              ioa config struct
+ * @work:              ioa config struct
  *
  * Called at task level from a work thread. This function takes care
  * of adding and removing device from the mid-layer as configuration
@@ -2102,13 +2316,14 @@ static void ipr_release_dump(struct kref *kref)
  * Return value:
  *     nothing
  **/
-static void ipr_worker_thread(void *data)
+static void ipr_worker_thread(struct work_struct *work)
 {
        unsigned long lock_flags;
        struct ipr_resource_entry *res;
        struct scsi_device *sdev;
        struct ipr_dump *dump;
-       struct ipr_ioa_cfg *ioa_cfg = data;
+       struct ipr_ioa_cfg *ioa_cfg =
+               container_of(work, struct ipr_ioa_cfg, work_q);
        u8 bus, target, lun;
        int did_work;
 
@@ -2969,7 +3184,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
        struct ipr_dump *dump;
        unsigned long lock_flags = 0;
 
-       ENTER;
        dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
 
        if (!dump) {
@@ -2996,7 +3210,6 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
        }
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-       LEAVE;
        return 0;
 }
 
@@ -3573,6 +3786,12 @@ static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
 
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       while(ioa_cfg->in_reset_reload) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       }
+
        res = sata_port->res;
        if (res) {
                rc = ipr_device_reset(ioa_cfg, res);
@@ -3636,6 +3855,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
                if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
                        if (ipr_cmd->scsi_cmd)
                                ipr_cmd->done = ipr_scsi_eh_done;
+                       if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+                               ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+                               ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+                       }
                }
        }
 
@@ -3770,7 +3993,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
         */
        if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
                return FAILED;
-       if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
+       if (!res || !ipr_is_gscsi(res))
                return FAILED;
 
        list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -4615,7 +4838,7 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
  * Return value:
  *     0 on success / other on failure
  **/
-int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
        struct ipr_resource_entry *res;
 
@@ -4648,40 +4871,6 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
        return buffer;
 }
 
-/**
- * ipr_scsi_timed_out - Handle scsi command timeout
- * @scsi_cmd:  scsi command struct
- *
- * Return value:
- *     EH_NOT_HANDLED
- **/
-enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
-{
-       struct ipr_ioa_cfg *ioa_cfg;
-       struct ipr_cmnd *ipr_cmd;
-       unsigned long flags;
-
-       ENTER;
-       spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
-       ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
-
-       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-               if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
-                       ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
-                       ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
-       LEAVE;
-       return EH_NOT_HANDLED;
-}
-
-static struct scsi_transport_template ipr_transport_template = {
-       .eh_timed_out = ipr_scsi_timed_out
-};
-
 static struct scsi_host_template driver_template = {
        .module = THIS_MODULE,
        .name = "IPR",
@@ -4776,6 +4965,12 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
        unsigned long flags;
 
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       while(ioa_cfg->in_reset_reload) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+               wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+               spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+       }
+
        list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
                if (ipr_cmd->qc == qc) {
                        ipr_device_reset(ioa_cfg, sata_port->res);
@@ -6745,7 +6940,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
                return -ENOMEM;
 
        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
-               ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
+               ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 
                if (!ipr_cmd) {
                        ipr_free_cmd_blks(ioa_cfg);
@@ -6832,6 +7027,7 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
 
                ioa_cfg->hostrcb[i]->hostrcb_dma =
                        ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
+               ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
                list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
        }
 
@@ -6926,7 +7122,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
-       INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+       INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
        init_waitqueue_head(&ioa_cfg->reset_wait_q);
        ioa_cfg->sdt_state = INACTIVE;
        if (ipr_enable_cache)
@@ -7017,7 +7213,6 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
 
        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
-       host->transportt = &ipr_transport_template;
        ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
                      sata_port_info.flags, &ipr_sata_ops);
 
@@ -7351,12 +7546,24 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
              0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
              0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
              PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
              0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B8,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
+             PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
                0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
@@ -7366,6 +7573,9 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
        { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
                PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
                0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
+       { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
        { }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
index 6d035283af0843f00a54caa75fa7ee94d19c86f0..9f62a1d4d5118b0e197d15ea44d2ba07ff5f9424 100644 (file)
@@ -37,8 +37,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.2.0"
-#define IPR_DRIVER_DATE "(September 25, 2006)"
+#define IPR_DRIVER_VERSION "2.3.0"
+#define IPR_DRIVER_DATE "(November 8, 2006)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -54,6 +54,8 @@
  */
 #define IPR_NUM_BASE_CMD_BLKS                          100
 
+#define PCI_DEVICE_ID_IBM_OBSIDIAN_E   0x0339
+
 #define IPR_SUBS_DEV_ID_2780   0x0264
 #define IPR_SUBS_DEV_ID_5702   0x0266
 #define IPR_SUBS_DEV_ID_5703   0x0278
 #define IPR_SUBS_DEV_ID_571F   0x02D5
 #define IPR_SUBS_DEV_ID_572A   0x02C1
 #define IPR_SUBS_DEV_ID_572B   0x02C2
+#define IPR_SUBS_DEV_ID_572F   0x02C3
 #define IPR_SUBS_DEV_ID_575B   0x030D
+#define IPR_SUBS_DEV_ID_575C   0x0338
+#define IPR_SUBS_DEV_ID_57B7   0x0360
+#define IPR_SUBS_DEV_ID_57B8   0x02C2
 
 #define IPR_NAME                               "ipr"
 
 #define IPR_IOASC_IOA_WAS_RESET                        0x10000001
 #define IPR_IOASC_PCI_ACCESS_ERROR                     0x10000002
 
+#define IPR_DEFAULT_MAX_ERROR_DUMP                     984
 #define IPR_NUM_LOG_HCAMS                              2
 #define IPR_NUM_CFG_CHG_HCAMS                          2
 #define IPR_NUM_HCAMS  (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
@@ -731,6 +738,64 @@ struct ipr_hostrcb_type_17_error {
        u32 data[476];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_config_element {
+       u8 type_status;
+#define IPR_PATH_CFG_TYPE_MASK 0xF0
+#define IPR_PATH_CFG_NOT_EXIST 0x00
+#define IPR_PATH_CFG_IOA_PORT          0x10
+#define IPR_PATH_CFG_EXP_PORT          0x20
+#define IPR_PATH_CFG_DEVICE_PORT       0x30
+#define IPR_PATH_CFG_DEVICE_LUN        0x40
+
+#define IPR_PATH_CFG_STATUS_MASK       0x0F
+#define IPR_PATH_CFG_NO_PROB           0x00
+#define IPR_PATH_CFG_DEGRADED          0x01
+#define IPR_PATH_CFG_FAILED            0x02
+#define IPR_PATH_CFG_SUSPECT           0x03
+#define IPR_PATH_NOT_DETECTED          0x04
+#define IPR_PATH_INCORRECT_CONN        0x05
+
+       u8 cascaded_expander;
+       u8 phy;
+       u8 link_rate;
+#define IPR_PHY_LINK_RATE_MASK 0x0F
+
+       __be32 wwid[2];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_fabric_desc {
+       __be16 length;
+       u8 ioa_port;
+       u8 cascaded_expander;
+       u8 phy;
+       u8 path_state;
+#define IPR_PATH_ACTIVE_MASK           0xC0
+#define IPR_PATH_NO_INFO               0x00
+#define IPR_PATH_ACTIVE                        0x40
+#define IPR_PATH_NOT_ACTIVE            0x80
+
+#define IPR_PATH_STATE_MASK            0x0F
+#define IPR_PATH_STATE_NO_INFO 0x00
+#define IPR_PATH_HEALTHY               0x01
+#define IPR_PATH_DEGRADED              0x02
+#define IPR_PATH_FAILED                        0x03
+
+       __be16 num_entries;
+       struct ipr_hostrcb_config_element elem[1];
+}__attribute__((packed, aligned (4)));
+
+#define for_each_fabric_cfg(fabric, cfg) \
+               for (cfg = (fabric)->elem; \
+                       cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
+                       cfg++)
+
+struct ipr_hostrcb_type_20_error {
+       u8 failure_reason[64];
+       u8 reserved[3];
+       u8 num_entries;
+       struct ipr_hostrcb_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_error {
        __be32 failing_dev_ioasc;
        struct ipr_res_addr failing_dev_res_addr;
@@ -747,6 +812,7 @@ struct ipr_hostrcb_error {
                struct ipr_hostrcb_type_13_error type_13_error;
                struct ipr_hostrcb_type_14_error type_14_error;
                struct ipr_hostrcb_type_17_error type_17_error;
+               struct ipr_hostrcb_type_20_error type_20_error;
        } u;
 }__attribute__((packed, aligned (4)));
 
@@ -786,6 +852,7 @@ struct ipr_hcam {
 #define IPR_HOST_RCB_OVERLAY_ID_14                             0x14
 #define IPR_HOST_RCB_OVERLAY_ID_16                             0x16
 #define IPR_HOST_RCB_OVERLAY_ID_17                             0x17
+#define IPR_HOST_RCB_OVERLAY_ID_20                             0x20
 #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT                        0xFF
 
        u8 reserved1[3];
@@ -805,6 +872,7 @@ struct ipr_hostrcb {
        struct ipr_hcam hcam;
        dma_addr_t hostrcb_dma;
        struct list_head queue;
+       struct ipr_ioa_cfg *ioa_cfg;
 };
 
 /* IPR smart dump table structures */
@@ -1283,6 +1351,17 @@ struct ipr_ucode_image_header {
        }                                                               \
 }
 
+#define ipr_hcam_err(hostrcb, fmt, ...)                                        \
+{                                                                                                      \
+       if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) {             \
+               ipr_ra_err((hostrcb)->ioa_cfg,                                                  \
+                               (hostrcb)->hcam.u.error.failing_dev_res_addr,                   \
+                               fmt, ##__VA_ARGS__);                                                    \
+       } else {                                                                                        \
+               dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__);            \
+       }                                                                                               \
+}
+
 #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
        __FILE__, __FUNCTION__, __LINE__)
 
index f06a06ae6092fc3e0bc7cf05d00188596886cced..8b704f73055a43d075762bb1f1e4dd84fbd39b2a 100644 (file)
@@ -5001,7 +5001,7 @@ ips_init_copperhead(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       msleep(IPS_ONE_SEC);
+                       MDELAY(IPS_ONE_SEC);
                }
 
                if (j >= 45)
@@ -5027,7 +5027,7 @@ ips_init_copperhead(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       msleep(IPS_ONE_SEC);
+                       MDELAY(IPS_ONE_SEC);
                }
 
                if (j >= 240)
@@ -5045,7 +5045,7 @@ ips_init_copperhead(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
        }
 
        if (i >= 240)
@@ -5095,7 +5095,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       msleep(IPS_ONE_SEC);
+                       MDELAY(IPS_ONE_SEC);
                }
 
                if (j >= 45)
@@ -5121,7 +5121,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                                break;
 
                        /* Delay for 1 Second */
-                       msleep(IPS_ONE_SEC);
+                       MDELAY(IPS_ONE_SEC);
                }
 
                if (j >= 240)
@@ -5139,7 +5139,7 @@ ips_init_copperhead_memio(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
        }
 
        if (i >= 240)
@@ -5191,7 +5191,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
        }
 
        if (i >= 45) {
@@ -5217,7 +5217,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        if (Post != 0x4F00)
                                break;
                        /* Delay for 1 Second */
-                       msleep(IPS_ONE_SEC);
+                       MDELAY(IPS_ONE_SEC);
                }
 
                if (i >= 120) {
@@ -5247,7 +5247,7 @@ ips_init_morpheus(ips_ha_t * ha)
                        break;
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
        }
 
        if (i >= 240) {
@@ -5307,12 +5307,12 @@ ips_reset_copperhead(ips_ha_t * ha)
                outb(IPS_BIT_RST, ha->io_addr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
 
                outb(0, ha->io_addr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
 
                if ((*ha->func.init) (ha))
                        break;
@@ -5352,12 +5352,12 @@ ips_reset_copperhead_memio(ips_ha_t * ha)
                writeb(IPS_BIT_RST, ha->mem_ptr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
 
                writeb(0, ha->mem_ptr + IPS_REG_SCPR);
 
                /* Delay for 1 Second */
-               msleep(IPS_ONE_SEC);
+               MDELAY(IPS_ONE_SEC);
 
                if ((*ha->func.init) (ha))
                        break;
@@ -5398,7 +5398,7 @@ ips_reset_morpheus(ips_ha_t * ha)
                writel(0x80000000, ha->mem_ptr + IPS_REG_I960_IDR);
 
                /* Delay for 5 Seconds */
-               msleep(5 * IPS_ONE_SEC);
+               MDELAY(5 * IPS_ONE_SEC);
 
                /* Do a PCI config read to wait for adapter */
                pci_read_config_byte(ha->pcidev, 4, &junk);
index 34680f3dd4523e10662aa03f6605478c2a72b262..b726dcc424b190f214d571a828339c125f28c6c4 100644 (file)
@@ -51,6 +51,7 @@
    #define _IPS_H_
 
 #include <linux/version.h>
+#include <linux/nmi.h>
    #include <asm/uaccess.h>
    #include <asm/io.h>
 
             dev_printk(level , &((pcidev)->dev) , format , ## arg)
    #endif
 
-   #ifndef MDELAY
-      #define MDELAY mdelay
-   #endif
+   #define MDELAY(n)                   \
+       do {                            \
+               mdelay(n);              \
+               touch_nmi_watchdog();   \
+       } while (0)
 
    #ifndef min
       #define min(x,y) ((x) < (y) ? x : y)
index 5d88621894858e7d69fe534f50ea6faf532866b7..e11b23c641e28e36506c272a626b8dc5cbfc01ce 100644 (file)
@@ -719,9 +719,10 @@ again:
        return rc;
 }
 
-static void iscsi_xmitworker(void *data)
+static void iscsi_xmitworker(struct work_struct *work)
 {
-       struct iscsi_conn *conn = data;
+       struct iscsi_conn *conn =
+               container_of(work, struct iscsi_conn, xmitwork);
        int rc;
        /*
         * serialize Xmit worker on a per-connection basis.
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
        if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
                goto mgmtqueue_alloc_fail;
 
-       INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
+       INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
        /* allocate login_mtask used for the login/text sequences */
        spin_lock_bh(&session->lock);
index d977bd492d8d0d141e566df75a5d589367f3fb04..fb7df7b758112b59359e6bc2d37b8afe11f18c66 100644 (file)
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port)
  * Discover process only interrogates devices in order to discover the
  * domain.
  */
-static void sas_discover_domain(void *data)
+static void sas_discover_domain(struct work_struct *work)
 {
        int error = 0;
-       struct asd_sas_port *port = data;
+       struct sas_discovery_event *ev =
+               container_of(work, struct sas_discovery_event, work);
+       struct asd_sas_port *port = ev->port;
 
        sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock,
                        &port->disc.pending);
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data)
                    current->pid, error);
 }
 
-static void sas_revalidate_domain(void *data)
+static void sas_revalidate_domain(struct work_struct *work)
 {
        int res = 0;
-       struct asd_sas_port *port = data;
+       struct sas_discovery_event *ev =
+               container_of(work, struct sas_discovery_event, work);
+       struct asd_sas_port *port = ev->port;
 
        sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock,
                        &port->disc.pending);
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
        BUG_ON(ev >= DISC_NUM_EVENTS);
 
        sas_queue_event(ev, &disc->disc_event_lock, &disc->pending,
-                       &disc->disc_work[ev], port->ha->core.shost);
+                       &disc->disc_work[ev].work, port->ha->core.shost);
 
        return 0;
 }
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
 {
        int i;
 
-       static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
                [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
                [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
        };
 
        spin_lock_init(&disc->disc_event_lock);
        disc->pending = 0;
-       for (i = 0; i < DISC_NUM_EVENTS; i++)
-               INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port);
+       for (i = 0; i < DISC_NUM_EVENTS; i++) {
+               INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+               disc->disc_work[i].port = port;
+       }
 }
index 19110ed1c89ca7dd984208d174d88b5b73ca70b1..d83392ee6823f09dab380b1008d2e2bbbf0031ba 100644 (file)
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
        BUG_ON(event >= HA_NUM_EVENTS);
 
        sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending,
-                       &sas_ha->ha_events[event], sas_ha->core.shost);
+                       &sas_ha->ha_events[event].work, sas_ha->core.shost);
 }
 
 static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
        BUG_ON(event >= PORT_NUM_EVENTS);
 
        sas_queue_event(event, &ha->event_lock, &phy->port_events_pending,
-                       &phy->port_events[event], ha->core.shost);
+                       &phy->port_events[event].work, ha->core.shost);
 }
 
 static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
        BUG_ON(event >= PHY_NUM_EVENTS);
 
        sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending,
-                       &phy->phy_events[event], ha->core.shost);
+                       &phy->phy_events[event].work, ha->core.shost);
 }
 
 int sas_init_events(struct sas_ha_struct *sas_ha)
 {
-       static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
                [HAE_RESET] = sas_hae_reset,
        };
 
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
 
        spin_lock_init(&sas_ha->event_lock);
 
-       for (i = 0; i < HA_NUM_EVENTS; i++)
-               INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha);
+       for (i = 0; i < HA_NUM_EVENTS; i++) {
+               INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+               sas_ha->ha_events[i].ha = sas_ha;
+       }
 
        sas_ha->notify_ha_event = notify_ha_event;
        sas_ha->notify_port_event = notify_port_event;
index e34a934354978ba4541e97e0d8739c402ea1f1e7..d31e6fa466f79668f151c0d0c301bbccf27c848f 100644 (file)
@@ -597,10 +597,15 @@ static struct domain_device *sas_ex_discover_end_dev(
        child->iproto = phy->attached_iproto;
        memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
        sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
-       phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
-       BUG_ON(!phy->port);
-       /* FIXME: better error handling*/
-       BUG_ON(sas_port_add(phy->port) != 0);
+       if (!phy->port) {
+               phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
+               if (unlikely(!phy->port))
+                       goto out_err;
+               if (unlikely(sas_port_add(phy->port) != 0)) {
+                       sas_port_free(phy->port);
+                       goto out_err;
+               }
+       }
        sas_ex_get_linkrate(parent, child, phy);
 
        if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) {
@@ -615,8 +620,7 @@ static struct domain_device *sas_ex_discover_end_dev(
                        SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
                                    "0x%x\n", SAS_ADDR(parent->sas_addr),
                                    phy_id, res);
-                       kfree(child);
-                       return NULL;
+                       goto out_free;
                }
                memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
                       sizeof(struct dev_to_host_fis));
@@ -627,14 +631,14 @@ static struct domain_device *sas_ex_discover_end_dev(
                                    "%016llx:0x%x returned 0x%x\n",
                                    SAS_ADDR(child->sas_addr),
                                    SAS_ADDR(parent->sas_addr), phy_id, res);
-                       kfree(child);
-                       return NULL;
+                       goto out_free;
                }
        } else if (phy->attached_tproto & SAS_PROTO_SSP) {
                child->dev_type = SAS_END_DEV;
                rphy = sas_end_device_alloc(phy->port);
                /* FIXME: error handling */
-               BUG_ON(!rphy);
+               if (unlikely(!rphy))
+                       goto out_free;
                child->tproto = phy->attached_tproto;
                sas_init_dev(child);
 
@@ -651,9 +655,7 @@ static struct domain_device *sas_ex_discover_end_dev(
                                    "at %016llx:0x%x returned 0x%x\n",
                                    SAS_ADDR(child->sas_addr),
                                    SAS_ADDR(parent->sas_addr), phy_id, res);
-                       /* FIXME: this kfrees list elements without removing them */
-                       //kfree(child);
-                       return NULL;
+                       goto out_list_del;
                }
        } else {
                SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n",
@@ -663,6 +665,16 @@ static struct domain_device *sas_ex_discover_end_dev(
 
        list_add_tail(&child->siblings, &parent_ex->children);
        return child;
+
+ out_list_del:
+       list_del(&child->dev_list_node);
+       sas_rphy_free(rphy);
+ out_free:
+       sas_port_delete(phy->port);
+ out_err:
+       phy->port = NULL;
+       kfree(child);
+       return NULL;
 }
 
 static struct domain_device *sas_ex_discover_expander(
index c836a237fb7955b6e53a88b0916d7ad551b0b75a..2f0c07fc3f48c1da732c37e4862727b976595432 100644 (file)
@@ -36,7 +36,7 @@
 
 #include "../scsi_sas_internal.h"
 
-kmem_cache_t *sas_task_cache;
+struct kmem_cache *sas_task_cache;
 
 /*------------ SAS addr hash -----------*/
 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
 
 /* ---------- HA events ---------- */
 
-void sas_hae_reset(void *data)
+void sas_hae_reset(struct work_struct *work)
 {
-       struct sas_ha_struct *ha = data;
+       struct sas_ha_event *ev =
+               container_of(work, struct sas_ha_event, work);
+       struct sas_ha_struct *ha = ev->ha;
 
        sas_begin_event(HAE_RESET, &ha->event_lock,
                        &ha->pending);
@@ -112,6 +114,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
                }
        }
 
+       INIT_LIST_HEAD(&sas_ha->eh_done_q);
+
        return 0;
 
 Undo_ports:
@@ -142,7 +146,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
        return sas_smp_get_phy_events(phy);
 }
 
-static int sas_phy_reset(struct sas_phy *phy, int hard_reset)
+int sas_phy_reset(struct sas_phy *phy, int hard_reset)
 {
        int ret;
        enum phy_func reset_type;
index bffcee474921580afc860ca77e1af2db2485c7e0..137d7e496b6d99ab6d1f6c8cbac094e27d072760 100644 (file)
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
 
 void sas_deform_port(struct asd_sas_phy *phy);
 
-void sas_porte_bytes_dmaed(void *);
-void sas_porte_broadcast_rcvd(void *);
-void sas_porte_link_reset_err(void *);
-void sas_porte_timer_event(void *);
-void sas_porte_hard_reset(void *);
+void sas_porte_bytes_dmaed(struct work_struct *work);
+void sas_porte_broadcast_rcvd(struct work_struct *work);
+void sas_porte_link_reset_err(struct work_struct *work);
+void sas_porte_timer_event(struct work_struct *work);
+void sas_porte_hard_reset(struct work_struct *work);
 
 int sas_notify_lldd_dev_found(struct domain_device *);
 void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy);
 
 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
 
-void sas_hae_reset(void *);
+void sas_hae_reset(struct work_struct *work);
 
 static inline void sas_queue_event(int event, spinlock_t *lock,
                                   unsigned long *pending,
index 9340cdbae4a3a0af7e3dd51bf589b4d364612f15..b459c4b635b1b73dfe5e1ed5304f6bad5a6ee588 100644 (file)
 
 /* ---------- Phy events ---------- */
 
-static void sas_phye_loss_of_signal(void *data)
+static void sas_phye_loss_of_signal(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock,
                        &phy->phy_events_pending);
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data)
        sas_deform_port(phy);
 }
 
-static void sas_phye_oob_done(void *data)
+static void sas_phye_oob_done(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock,
                        &phy->phy_events_pending);
        phy->error = 0;
 }
 
-static void sas_phye_oob_error(void *data)
+static void sas_phye_oob_error(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct asd_sas_port *port = phy->port;
        struct sas_internal *i =
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data)
        }
 }
 
-static void sas_phye_spinup_hold(void *data)
+static void sas_phye_spinup_hold(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        struct sas_ha_struct *sas_ha = phy->ha;
        struct sas_internal *i =
                to_sas_internal(sas_ha->core.shost->transportt);
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
 {
        int i;
 
-       static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
                [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
                [PHYE_OOB_DONE] = sas_phye_oob_done,
                [PHYE_OOB_ERROR] = sas_phye_oob_error,
                [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
        };
 
-       static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = {
+       static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
                [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
                [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
                [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
 
                phy->error = 0;
                INIT_LIST_HEAD(&phy->port_phy_el);
-               for (k = 0; k < PORT_NUM_EVENTS; k++)
-                       INIT_WORK(&phy->port_events[k], sas_port_event_fns[k],
-                                 phy);
+               for (k = 0; k < PORT_NUM_EVENTS; k++) {
+                       INIT_WORK(&phy->port_events[k].work,
+                                 sas_port_event_fns[k]);
+                       phy->port_events[k].phy = phy;
+               }
+
+               for (k = 0; k < PHY_NUM_EVENTS; k++) {
+                       INIT_WORK(&phy->phy_events[k].work,
+                                 sas_phy_event_fns[k]);
+                       phy->phy_events[k].phy = phy;
+               }
 
-               for (k = 0; k < PHY_NUM_EVENTS; k++)
-                       INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k],
-                                 phy);
                phy->port = NULL;
                phy->ha = sas_ha;
                spin_lock_init(&phy->frame_rcvd_lock);
index 253cdcf306a2539164bb5155752a89efa33e4a7f..971c37ceecb4a517fbba785e35b951545d4f8208 100644 (file)
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy)
 
 /* ---------- SAS port events ---------- */
 
-void sas_porte_bytes_dmaed(void *data)
+void sas_porte_bytes_dmaed(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock,
                        &phy->port_events_pending);
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data)
        sas_form_port(phy);
 }
 
-void sas_porte_broadcast_rcvd(void *data)
+void sas_porte_broadcast_rcvd(struct work_struct *work)
 {
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
        unsigned long flags;
        u32 prim;
-       struct asd_sas_phy *phy = data;
 
        sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock,
                        &phy->port_events_pending);
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data)
        sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
 }
 
-void sas_porte_link_reset_err(void *data)
+void sas_porte_link_reset_err(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock,
                        &phy->port_events_pending);
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data)
        sas_deform_port(phy);
 }
 
-void sas_porte_timer_event(void *data)
+void sas_porte_timer_event(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock,
                        &phy->port_events_pending);
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data)
        sas_deform_port(phy);
 }
 
-void sas_porte_hard_reset(void *data)
+void sas_porte_hard_reset(struct work_struct *work)
 {
-       struct asd_sas_phy *phy = data;
+       struct asd_sas_event *ev =
+               container_of(work, struct asd_sas_event, work);
+       struct asd_sas_phy *phy = ev->phy;
 
        sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock,
                        &phy->port_events_pending);
index e46e79355b776ec98a407962694a8e3756414571..22672d54aa2742d7c515f6c89f818b4159b49a22 100644 (file)
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi.h>
+#include <scsi/scsi_eh.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport_sas.h>
 #include "../scsi_sas_internal.h"
+#include "../scsi_transport_api.h"
 
 #include <linux/err.h>
 #include <linux/blkdev.h>
@@ -46,6 +48,7 @@ static void sas_scsi_task_done(struct sas_task *task)
 {
        struct task_status_struct *ts = &task->task_status;
        struct scsi_cmnd *sc = task->uldd_task;
+       struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
        unsigned ts_flags = task->task_state_flags;
        int hs = 0, stat = 0;
 
@@ -116,7 +119,7 @@ static void sas_scsi_task_done(struct sas_task *task)
        sas_free_task(task);
        /* This is very ugly but this is how SCSI Core works. */
        if (ts_flags & SAS_TASK_STATE_ABORTED)
-               scsi_finish_command(sc);
+               scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
        else
                sc->scsi_done(sc);
 }
@@ -307,6 +310,15 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
                spin_unlock_irqrestore(&core->task_queue_lock, flags);
        }
 
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+               SAS_DPRINTK("%s: task 0x%p already aborted\n",
+                           __FUNCTION__, task);
+               return TASK_IS_ABORTED;
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
        for (i = 0; i < 5; i++) {
                SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task);
                res = si->dft->lldd_abort_task(task);
@@ -409,13 +421,16 @@ Again:
        SAS_DPRINTK("going over list...\n");
        list_for_each_entry_safe(cmd, n, &error_q, eh_entry) {
                struct sas_task *task = TO_SAS_TASK(cmd);
+               list_del_init(&cmd->eh_entry);
 
+               if (!task) {
+                       SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__);
+                       continue;
+               }
                SAS_DPRINTK("trying to find task 0x%p\n", task);
-               list_del_init(&cmd->eh_entry);
                res = sas_scsi_find_task(task);
 
                cmd->eh_eflags = 0;
-               shost->host_failed--;
 
                switch (res) {
                case TASK_IS_DONE:
@@ -491,6 +506,7 @@ Again:
                }
        }
 out:
+       scsi_eh_flush_done_q(&ha->eh_done_q);
        SAS_DPRINTK("--- Exit %s\n", __FUNCTION__);
        return;
 clear_q:
@@ -508,12 +524,18 @@ enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
        unsigned long flags;
 
        if (!task) {
-               SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
+               SAS_DPRINTK("command 0x%p, task 0x%p, gone: EH_HANDLED\n",
                            cmd, task);
                return EH_HANDLED;
        }
 
        spin_lock_irqsave(&task->task_state_lock, flags);
+       if (task->task_state_flags & SAS_TASK_INITIATOR_ABORTED) {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+               SAS_DPRINTK("command 0x%p, task 0x%p, aborted by initiator: "
+                           "EH_NOT_HANDLED\n", cmd, task);
+               return EH_NOT_HANDLED;
+       }
        if (task->task_state_flags & SAS_TASK_STATE_DONE) {
                spin_unlock_irqrestore(&task->task_state_lock, flags);
                SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
@@ -777,6 +799,66 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
        spin_unlock_irqrestore(&core->task_queue_lock, flags);
 }
 
+static int do_sas_task_abort(struct sas_task *task)
+{
+       struct scsi_cmnd *sc = task->uldd_task;
+       struct sas_internal *si =
+               to_sas_internal(task->dev->port->ha->core.shost->transportt);
+       unsigned long flags;
+       int res;
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+               SAS_DPRINTK("%s: Task %p already aborted.\n", __FUNCTION__,
+                           task);
+               return 0;
+       }
+
+       task->task_state_flags |= SAS_TASK_INITIATOR_ABORTED;
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+               task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       if (!si->dft->lldd_abort_task)
+               return -ENODEV;
+
+       res = si->dft->lldd_abort_task(task);
+       if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
+           (res == TMF_RESP_FUNC_COMPLETE))
+       {
+               /* SMP commands don't have scsi_cmds(?) */
+               if (!sc) {
+                       task->task_done(task);
+                       return 0;
+               }
+               scsi_req_abort_cmd(sc);
+               scsi_schedule_eh(sc->device->host);
+               return 0;
+       }
+
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       task->task_state_flags &= ~SAS_TASK_INITIATOR_ABORTED;
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+               task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+       return -EAGAIN;
+}
+
+void sas_task_abort(struct work_struct *work)
+{
+       struct sas_task *task =
+               container_of(work, struct sas_task, abort_work);
+       int i;
+
+       for (i = 0; i < 5; i++)
+               if (!do_sas_task_abort(task))
+                       return;
+
+       SAS_DPRINTK("%s: Could not kill task!\n", __FUNCTION__);
+}
+
 EXPORT_SYMBOL_GPL(sas_queuecommand);
 EXPORT_SYMBOL_GPL(sas_target_alloc);
 EXPORT_SYMBOL_GPL(sas_slave_configure);
@@ -784,3 +866,5 @@ EXPORT_SYMBOL_GPL(sas_slave_destroy);
 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
 EXPORT_SYMBOL_GPL(sas_change_queue_type);
 EXPORT_SYMBOL_GPL(sas_bios_param);
+EXPORT_SYMBOL_GPL(sas_task_abort);
+EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
new file mode 100644 (file)
index 0000000..89403b0
--- /dev/null
@@ -0,0 +1,441 @@
+/*
+ * SCSI RDAM Protocol lib functions
+ *
+ * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/err.h>
+#include <linux/kfifo.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/srp.h>
+#include <scsi/libsrp.h>
+
+enum srp_task_attributes {
+       SRP_SIMPLE_TASK = 0,
+       SRP_HEAD_TASK = 1,
+       SRP_ORDERED_TASK = 2,
+       SRP_ACA_TASK = 4
+};
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)                                  \
+do {                                                           \
+       printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);  \
+} while (0)
+/* #define dprintk eprintk */
+#define dprintk(fmt, args...)
+
+static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
+                            struct srp_buf **ring)
+{
+       int i;
+       struct iu_entry *iue;
+
+       q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
+       if (!q->pool)
+               return -ENOMEM;
+       q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
+       if (!q->items)
+               goto free_pool;
+
+       spin_lock_init(&q->lock);
+       q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
+                             GFP_KERNEL, &q->lock);
+       if (IS_ERR(q->queue))
+               goto free_item;
+
+       for (i = 0, iue = q->items; i < max; i++) {
+               __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
+               iue->sbuf = ring[i];
+               iue++;
+       }
+       return 0;
+
+free_item:
+       kfree(q->items);
+free_pool:
+       kfree(q->pool);
+       return -ENOMEM;
+}
+
+static void srp_iu_pool_free(struct srp_queue *q)
+{
+       kfree(q->items);
+       kfree(q->pool);
+}
+
+static struct srp_buf **srp_ring_alloc(struct device *dev,
+                                      size_t max, size_t size)
+{
+       int i;
+       struct srp_buf **ring;
+
+       ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       for (i = 0; i < max; i++) {
+               ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
+               if (!ring[i])
+                       goto out;
+               ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
+                                                 GFP_KERNEL);
+               if (!ring[i]->buf)
+                       goto out;
+       }
+       return ring;
+
+out:
+       for (i = 0; i < max && ring[i]; i++) {
+               if (ring[i]->buf)
+                       dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+               kfree(ring[i]);
+       }
+       kfree(ring);
+
+       return NULL;
+}
+
+static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
+                         size_t size)
+{
+       int i;
+
+       for (i = 0; i < max; i++) {
+               dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
+               kfree(ring[i]);
+       }
+}
+
+int srp_target_alloc(struct srp_target *target, struct device *dev,
+                    size_t nr, size_t iu_size)
+{
+       int err;
+
+       spin_lock_init(&target->lock);
+       INIT_LIST_HEAD(&target->cmd_queue);
+
+       target->dev = dev;
+       target->dev->driver_data = target;
+
+       target->srp_iu_size = iu_size;
+       target->rx_ring_size = nr;
+       target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
+       if (!target->rx_ring)
+               return -ENOMEM;
+       err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
+       if (err)
+               goto free_ring;
+
+       return 0;
+
+free_ring:
+       srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
+       return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(srp_target_alloc);
+
+void srp_target_free(struct srp_target *target)
+{
+       srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
+                     target->srp_iu_size);
+       srp_iu_pool_free(&target->iu_queue);
+}
+EXPORT_SYMBOL_GPL(srp_target_free);
+
+struct iu_entry *srp_iu_get(struct srp_target *target)
+{
+       struct iu_entry *iue = NULL;
+
+       kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
+       if (!iue)
+               return iue;
+       iue->target = target;
+       INIT_LIST_HEAD(&iue->ilist);
+       iue->flags = 0;
+       return iue;
+}
+EXPORT_SYMBOL_GPL(srp_iu_get);
+
+void srp_iu_put(struct iu_entry *iue)
+{
+       kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
+}
+EXPORT_SYMBOL_GPL(srp_iu_put);
+
+static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
+                          enum dma_data_direction dir, srp_rdma_t rdma_io,
+                          int dma_map, int ext_desc)
+{
+       struct iu_entry *iue = NULL;
+       struct scatterlist *sg = NULL;
+       int err, nsg = 0, len;
+
+       if (dma_map) {
+               iue = (struct iu_entry *) sc->SCp.ptr;
+               sg = sc->request_buffer;
+
+               dprintk("%p %u %u %d\n", iue, sc->request_bufflen,
+                       md->len, sc->use_sg);
+
+               nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg,
+                                DMA_BIDIRECTIONAL);
+               if (!nsg) {
+                       printk("fail to map %p %d\n", iue, sc->use_sg);
+                       return 0;
+               }
+               len = min(sc->request_bufflen, md->len);
+       } else
+               len = md->len;
+
+       err = rdma_io(sc, sg, nsg, md, 1, dir, len);
+
+       if (dma_map)
+               dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+       return err;
+}
+
+static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
+                            struct srp_indirect_buf *id,
+                            enum dma_data_direction dir, srp_rdma_t rdma_io,
+                            int dma_map, int ext_desc)
+{
+       struct iu_entry *iue = NULL;
+       struct srp_direct_buf *md = NULL;
+       struct scatterlist dummy, *sg = NULL;
+       dma_addr_t token = 0;
+       long err;
+       unsigned int done = 0;
+       int nmd, nsg = 0, len;
+
+       if (dma_map || ext_desc) {
+               iue = (struct iu_entry *) sc->SCp.ptr;
+               sg = sc->request_buffer;
+
+               dprintk("%p %u %u %d %d\n",
+                       iue, sc->request_bufflen, id->len,
+                       cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
+       }
+
+       nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
+
+       if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
+           (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
+               md = &id->desc_list[0];
+               goto rdma;
+       }
+
+       if (ext_desc && dma_map) {
+               md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
+                               &token, GFP_KERNEL);
+               if (!md) {
+                       eprintk("Can't get dma memory %u\n", id->table_desc.len);
+                       return -ENOMEM;
+               }
+
+               sg_init_one(&dummy, md, id->table_desc.len);
+               sg_dma_address(&dummy) = token;
+               err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
+                             id->table_desc.len);
+               if (err < 0) {
+                       eprintk("Error copying indirect table %ld\n", err);
+                       goto free_mem;
+               }
+       } else {
+               eprintk("This command uses external indirect buffer\n");
+               return -EINVAL;
+       }
+
+rdma:
+       if (dma_map) {
+               nsg = dma_map_sg(iue->target->dev, sg, sc->use_sg, DMA_BIDIRECTIONAL);
+               if (!nsg) {
+                       eprintk("fail to map %p %d\n", iue, sc->use_sg);
+                       goto free_mem;
+               }
+               len = min(sc->request_bufflen, id->len);
+       } else
+               len = id->len;
+
+       err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
+
+       if (dma_map)
+               dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
+
+free_mem:
+       if (token && dma_map)
+               dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
+
+       return done;
+}
+
+static int data_out_desc_size(struct srp_cmd *cmd)
+{
+       int size = 0;
+       u8 fmt = cmd->buf_fmt >> 4;
+
+       switch (fmt) {
+       case SRP_NO_DATA_DESC:
+               break;
+       case SRP_DATA_DESC_DIRECT:
+               size = sizeof(struct srp_direct_buf);
+               break;
+       case SRP_DATA_DESC_INDIRECT:
+               size = sizeof(struct srp_indirect_buf) +
+                       sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
+               break;
+       default:
+               eprintk("client error. Invalid data_out_format %x\n", fmt);
+               break;
+       }
+       return size;
+}
+
+/*
+ * TODO: this can be called multiple times for a single command if it
+ * has very long data.
+ */
+int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
+                     srp_rdma_t rdma_io, int dma_map, int ext_desc)
+{
+       struct srp_direct_buf *md;
+       struct srp_indirect_buf *id;
+       enum dma_data_direction dir;
+       int offset, err = 0;
+       u8 format;
+
+       offset = cmd->add_cdb_len * 4;
+
+       dir = srp_cmd_direction(cmd);
+       if (dir == DMA_FROM_DEVICE)
+               offset += data_out_desc_size(cmd);
+
+       if (dir == DMA_TO_DEVICE)
+               format = cmd->buf_fmt >> 4;
+       else
+               format = cmd->buf_fmt & ((1U << 4) - 1);
+
+       switch (format) {
+       case SRP_NO_DATA_DESC:
+               break;
+       case SRP_DATA_DESC_DIRECT:
+               md = (struct srp_direct_buf *)
+                       (cmd->add_data + offset);
+               err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
+               break;
+       case SRP_DATA_DESC_INDIRECT:
+               id = (struct srp_indirect_buf *)
+                       (cmd->add_data + offset);
+               err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
+                                       ext_desc);
+               break;
+       default:
+               eprintk("Unknown format %d %x\n", dir, format);
+               break;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(srp_transfer_data);
+
+static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
+{
+       struct srp_direct_buf *md;
+       struct srp_indirect_buf *id;
+       int len = 0, offset = cmd->add_cdb_len * 4;
+       u8 fmt;
+
+       if (dir == DMA_TO_DEVICE)
+               fmt = cmd->buf_fmt >> 4;
+       else {
+               fmt = cmd->buf_fmt & ((1U << 4) - 1);
+               offset += data_out_desc_size(cmd);
+       }
+
+       switch (fmt) {
+       case SRP_NO_DATA_DESC:
+               break;
+       case SRP_DATA_DESC_DIRECT:
+               md = (struct srp_direct_buf *) (cmd->add_data + offset);
+               len = md->len;
+               break;
+       case SRP_DATA_DESC_INDIRECT:
+               id = (struct srp_indirect_buf *) (cmd->add_data + offset);
+               len = id->len;
+               break;
+       default:
+               eprintk("invalid data format %x\n", fmt);
+               break;
+       }
+       return len;
+}
+
+int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
+                 u64 addr)
+{
+       enum dma_data_direction dir;
+       struct scsi_cmnd *sc;
+       int tag, len, err;
+
+       switch (cmd->task_attr) {
+       case SRP_SIMPLE_TASK:
+               tag = MSG_SIMPLE_TAG;
+               break;
+       case SRP_ORDERED_TASK:
+               tag = MSG_ORDERED_TAG;
+               break;
+       case SRP_HEAD_TASK:
+               tag = MSG_HEAD_TAG;
+               break;
+       default:
+               eprintk("Task attribute %d not supported\n", cmd->task_attr);
+               tag = MSG_ORDERED_TAG;
+       }
+
+       dir = srp_cmd_direction(cmd);
+       len = vscsis_data_length(cmd, dir);
+
+       dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
+               cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
+
+       sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
+       if (!sc)
+               return -ENOMEM;
+
+       sc->SCp.ptr = info;
+       memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
+       sc->request_bufflen = len;
+       sc->request_buffer = (void *) (unsigned long) addr;
+       sc->tag = tag;
+       err = scsi_tgt_queue_command(sc, (struct scsi_lun *) &cmd->lun, cmd->tag);
+       if (err)
+               scsi_host_put_command(shost, sc);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(srp_cmd_queue);
+
+MODULE_DESCRIPTION("SCSI RDAM Protocol lib functions");
+MODULE_AUTHOR("FUJITA Tomonori");
+MODULE_LICENSE("GPL");
index 3f7f5f8abd7517524b330ac2cd98c13f3444999a..a7de0bca5bdd31efd19b2683e95feaa39e7aadcd 100644 (file)
@@ -296,13 +296,17 @@ struct lpfc_hba {
        uint32_t cfg_cr_delay;
        uint32_t cfg_cr_count;
        uint32_t cfg_multi_ring_support;
+       uint32_t cfg_multi_ring_rctl;
+       uint32_t cfg_multi_ring_type;
        uint32_t cfg_fdmi_on;
        uint32_t cfg_discovery_threads;
        uint32_t cfg_max_luns;
        uint32_t cfg_poll;
        uint32_t cfg_poll_tmo;
+       uint32_t cfg_use_msi;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
+       uint64_t cfg_soft_wwnn;
        uint64_t cfg_soft_wwpn;
 
        uint32_t dev_loss_tmo_changed;
@@ -355,7 +359,7 @@ struct lpfc_hba {
 #define VPD_PORT            0x8         /* valid vpd port data */
 #define VPD_MASK            0xf         /* mask for any vpd data */
 
-       uint8_t soft_wwpn_enable;
+       uint8_t soft_wwn_enable;
 
        struct timer_list fcp_poll_timer;
        struct timer_list els_tmofunc;
index 2a4e02e7a39211d83960c8522417d1b89f2d9be4..f247e786af99487cda664cae44193adec19af7b3 100644 (file)
@@ -552,10 +552,10 @@ static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
 static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
 
 
-static char *lpfc_soft_wwpn_key = "C99G71SL8032A";
+static char *lpfc_soft_wwn_key = "C99G71SL8032A";
 
 static ssize_t
-lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
+lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
                                size_t count)
 {
        struct Scsi_Host *host = class_to_shost(cdev);
@@ -579,15 +579,15 @@ lpfc_soft_wwpn_enable_store(struct class_device *cdev, const char *buf,
        if (buf[cnt-1] == '\n')
                cnt--;
 
-       if ((cnt != strlen(lpfc_soft_wwpn_key)) ||
-           (strncmp(buf, lpfc_soft_wwpn_key, strlen(lpfc_soft_wwpn_key)) != 0))
+       if ((cnt != strlen(lpfc_soft_wwn_key)) ||
+           (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
                return -EINVAL;
 
-       phba->soft_wwpn_enable = 1;
+       phba->soft_wwn_enable = 1;
        return count;
 }
-static CLASS_DEVICE_ATTR(lpfc_soft_wwpn_enable, S_IWUSR, NULL,
-                               lpfc_soft_wwpn_enable_store);
+static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
+                               lpfc_soft_wwn_enable_store);
 
 static ssize_t
 lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
@@ -613,12 +613,12 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
        if (buf[cnt-1] == '\n')
                cnt--;
 
-       if (!phba->soft_wwpn_enable || (cnt < 16) || (cnt > 18) ||
+       if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
            ((cnt == 17) && (*buf++ != 'x')) ||
            ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
                return -EINVAL;
 
-       phba->soft_wwpn_enable = 0;
+       phba->soft_wwn_enable = 0;
 
        memset(wwpn, 0, sizeof(wwpn));
 
@@ -639,6 +639,8 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
        }
        phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
        fc_host_port_name(host) = phba->cfg_soft_wwpn;
+       if (phba->cfg_soft_wwnn)
+               fc_host_node_name(host) = phba->cfg_soft_wwnn;
 
        dev_printk(KERN_NOTICE, &phba->pcidev->dev,
                   "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -664,6 +666,66 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
 static CLASS_DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\
                         lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);
 
+static ssize_t
+lpfc_soft_wwnn_show(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+       return snprintf(buf, PAGE_SIZE, "0x%llx\n",
+                       (unsigned long long)phba->cfg_soft_wwnn);
+}
+
+
+static ssize_t
+lpfc_soft_wwnn_store(struct class_device *cdev, const char *buf, size_t count)
+{
+       struct Scsi_Host *host = class_to_shost(cdev);
+       struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;
+       unsigned int i, j, cnt=count;
+       u8 wwnn[8];
+
+       /* count may include a LF at end of string */
+       if (buf[cnt-1] == '\n')
+               cnt--;
+
+       if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) ||
+           ((cnt == 17) && (*buf++ != 'x')) ||
+           ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
+               return -EINVAL;
+
+       /*
+        * Allow wwnn to be set many times, as long as the enable is set.
+        * However, once the wwpn is set, everything locks.
+        */
+
+       memset(wwnn, 0, sizeof(wwnn));
+
+       /* Validate and store the new name */
+       for (i=0, j=0; i < 16; i++) {
+               if ((*buf >= 'a') && (*buf <= 'f'))
+                       j = ((j << 4) | ((*buf++ -'a') + 10));
+               else if ((*buf >= 'A') && (*buf <= 'F'))
+                       j = ((j << 4) | ((*buf++ -'A') + 10));
+               else if ((*buf >= '0') && (*buf <= '9'))
+                       j = ((j << 4) | (*buf++ -'0'));
+               else
+                       return -EINVAL;
+               if (i % 2) {
+                       wwnn[i/2] = j & 0xff;
+                       j = 0;
+               }
+       }
+       phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
+
+       dev_printk(KERN_NOTICE, &phba->pcidev->dev,
+                  "lpfc%d: soft_wwnn set. Value will take effect upon "
+                  "setting of the soft_wwpn\n", phba->brd_no);
+
+       return count;
+}
+static CLASS_DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
+                        lpfc_soft_wwnn_show, lpfc_soft_wwnn_store);
+
 
 static int lpfc_poll = 0;
 module_param(lpfc_poll, int, 0);
@@ -802,12 +864,11 @@ static CLASS_DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
 # LOG_MBOX                      0x4        Mailbox events
 # LOG_INIT                      0x8        Initialization events
 # LOG_LINK_EVENT                0x10       Link events
-# LOG_IP                        0x20       IP traffic history
 # LOG_FCP                       0x40       FCP traffic history
 # LOG_NODE                      0x80       Node table events
 # LOG_MISC                      0x400      Miscellaneous events
 # LOG_SLI                       0x800      SLI events
-# LOG_CHK_COND                  0x1000     FCP Check condition flag
+# LOG_FCP_ERROR                 0x1000     Only log FCP errors
 # LOG_LIBDFC                    0x2000     LIBDFC events
 # LOG_ALL_MSG                   0xffff     LOG all messages
 */
@@ -915,6 +976,22 @@ LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
                "SLI rings to spread IOCB entries across");
 
+/*
+# lpfc_multi_ring_rctl:  If lpfc_multi_ring_support is enabled, this
+# identifies what rctl value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
+*/
+LPFC_ATTR_R(multi_ring_rctl, FC_UNSOL_DATA, 1,
+            255, "Identifies RCTL for additional ring configuration");
+
+/*
+# lpfc_multi_ring_type:  If lpfc_multi_ring_support is enabled, this
+# identifies what type value to configure the additional ring for.
+# Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
+*/
+LPFC_ATTR_R(multi_ring_type, FC_LLC_SNAP, 1,
+            255, "Identifies TYPE for additional ring configuration");
+
 /*
 # lpfc_fdmi_on: controls FDMI support.
 #       0 = no FDMI support
@@ -946,6 +1023,15 @@ LPFC_ATTR_R(max_luns, 255, 0, 65535,
 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
             "Milliseconds driver will wait between polling FCP ring");
 
+/*
+# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
+#              support this feature
+#       0  = MSI disabled (default)
+#       1  = MSI enabled
+# Value range is [0,1]. Default value is 0.
+*/
+LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
+
 
 struct class_device_attribute *lpfc_host_attrs[] = {
        &class_device_attr_info,
@@ -974,6 +1060,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
        &class_device_attr_lpfc_cr_delay,
        &class_device_attr_lpfc_cr_count,
        &class_device_attr_lpfc_multi_ring_support,
+       &class_device_attr_lpfc_multi_ring_rctl,
+       &class_device_attr_lpfc_multi_ring_type,
        &class_device_attr_lpfc_fdmi_on,
        &class_device_attr_lpfc_max_luns,
        &class_device_attr_nport_evt_cnt,
@@ -982,8 +1070,10 @@ struct class_device_attribute *lpfc_host_attrs[] = {
        &class_device_attr_issue_reset,
        &class_device_attr_lpfc_poll,
        &class_device_attr_lpfc_poll_tmo,
+       &class_device_attr_lpfc_use_msi,
+       &class_device_attr_lpfc_soft_wwnn,
        &class_device_attr_lpfc_soft_wwpn,
-       &class_device_attr_lpfc_soft_wwpn_enable,
+       &class_device_attr_lpfc_soft_wwn_enable,
        NULL,
 };
 
@@ -1771,6 +1861,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_cr_delay_init(phba, lpfc_cr_delay);
        lpfc_cr_count_init(phba, lpfc_cr_count);
        lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
+       lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
+       lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
        lpfc_lun_queue_depth_init(phba, lpfc_lun_queue_depth);
        lpfc_fcp_class_init(phba, lpfc_fcp_class);
        lpfc_use_adisc_init(phba, lpfc_use_adisc);
@@ -1782,9 +1874,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
        lpfc_max_luns_init(phba, lpfc_max_luns);
        lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
+       lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
        lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
        phba->cfg_poll = lpfc_poll;
+       phba->cfg_soft_wwnn = 0L;
        phba->cfg_soft_wwpn = 0L;
 
        /*
index 3add7c237859d66347ff5ea69d19a3969d3da72e..a51a41b7f15d55566277619d8be5e64d06b31324 100644 (file)
@@ -558,6 +558,14 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
        return;
 }
 
+static void
+lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
+                        struct lpfc_iocbq * rspiocb)
+{
+       lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
+       return;
+}
+
 void
 lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
 {
@@ -629,6 +637,8 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
                bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
        else if (cmdcode == SLI_CTNS_RSNN_NN)
                bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
+       else if (cmdcode == SLI_CTNS_RFF_ID)
+               bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
        else
                bpl->tus.f.bdeSize = 0;
        bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -660,6 +670,17 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
                cmpl = lpfc_cmpl_ct_cmd_rft_id;
                break;
 
+       case SLI_CTNS_RFF_ID:
+               CtReq->CommandResponse.bits.CmdRsp =
+                       be16_to_cpu(SLI_CTNS_RFF_ID);
+               CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
+               CtReq->un.rff.feature_res = 0;
+               CtReq->un.rff.feature_tgt = 0;
+               CtReq->un.rff.type_code = FC_FCP_DATA;
+               CtReq->un.rff.feature_init = 1;
+               cmpl = lpfc_cmpl_ct_cmd_rff_id;
+               break;
+
        case SLI_CTNS_RNN_ID:
                CtReq->CommandResponse.bits.CmdRsp =
                    be16_to_cpu(SLI_CTNS_RNN_ID);
@@ -934,7 +955,8 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
                        ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
                        ae->ad.bits.AttrType = be16_to_cpu(OS_NAME_VERSION);
                        sprintf(ae->un.OsNameVersion, "%s %s %s",
-                               init_utsname()->sysname, init_utsname()->release,
+                               init_utsname()->sysname,
+                               init_utsname()->release,
                                init_utsname()->version);
                        len = strlen(ae->un.OsNameVersion);
                        len += (len & 3) ? (4 - (len & 3)) : 4;
index 71864cdc6c71a432803a1ed9183489223e362eb7..a5f33a0dd4e7b79fb204cff5553d04c71c76177e 100644 (file)
@@ -243,6 +243,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
                struct serv_parm *sp, IOCB_t *irsp)
 {
        LPFC_MBOXQ_t *mbox;
+       struct lpfc_dmabuf *mp;
        int rc;
 
        spin_lock_irq(phba->host->host_lock);
@@ -307,10 +308,14 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 
        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
        if (rc == MBX_NOT_FINISHED)
-               goto fail_free_mbox;
+               goto fail_issue_reg_login;
 
        return 0;
 
+ fail_issue_reg_login:
+       mp = (struct lpfc_dmabuf *) mbox->context1;
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
  fail_free_mbox:
        mempool_free(mbox, phba->mbox_mem_pool);
  fail:
@@ -657,6 +662,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba * phba, struct lpfc_dmabuf *prsp,
        uint8_t name[sizeof (struct lpfc_name)];
        uint32_t rc;
 
+       /* Fabric nodes can have the same WWPN so we don't bother searching
+        * by WWPN.  Just return the ndlp that was given to us.
+        */
+       if (ndlp->nlp_type & NLP_FABRIC)
+               return ndlp;
+
        lp = (uint32_t *) prsp->virt;
        sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
        memset(name, 0, sizeof (struct lpfc_name));
@@ -1122,7 +1133,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
                                                mempool_free(mbox,
                                                     phba->mbox_mem_pool);
                                                lpfc_disc_flush_list(phba);
-                                               psli->ring[(psli->ip_ring)].
+                                               psli->ring[(psli->extra_ring)].
                                                    flag &=
                                                    ~LPFC_STOP_IOCB_EVENT;
                                                psli->ring[(psli->fcp_ring)].
@@ -1851,6 +1862,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
        IOCB_t *irsp;
        struct lpfc_nodelist *ndlp;
        LPFC_MBOXQ_t *mbox = NULL;
+       struct lpfc_dmabuf *mp;
 
        irsp = &rspiocb->iocb;
 
@@ -1862,6 +1874,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
        /* Check to see if link went down during discovery */
        if ((lpfc_els_chk_latt(phba)) || !ndlp) {
                if (mbox) {
+                       mp = (struct lpfc_dmabuf *) mbox->context1;
+                       if (mp) {
+                               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                               kfree(mp);
+                       }
                        mempool_free( mbox, phba->mbox_mem_pool);
                }
                goto out;
@@ -1893,9 +1910,7 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
                        }
                        /* NOTE: we should have messages for unsuccessful
                           reglogin */
-                       mempool_free( mbox, phba->mbox_mem_pool);
                } else {
-                       mempool_free( mbox, phba->mbox_mem_pool);
                        /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
                        if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
                              ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
@@ -1907,6 +1922,12 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
                                }
                        }
                }
+               mp = (struct lpfc_dmabuf *) mbox->context1;
+               if (mp) {
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+               }
+               mempool_free(mbox, phba->mbox_mem_pool);
        }
 out:
        if (ndlp) {
@@ -2644,6 +2665,7 @@ lpfc_els_handle_rscn(struct lpfc_hba * phba)
                        ndlp->nlp_type |= NLP_FABRIC;
                        ndlp->nlp_prev_state = ndlp->nlp_state;
                        ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
+                       lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
                        lpfc_issue_els_plogi(phba, NameServer_DID, 0);
                        /* Wait for NameServer login cmpl before we can
                           continue */
@@ -3039,7 +3061,7 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
        /* FARP-REQ received from DID <did> */
        lpfc_printf_log(phba,
                         KERN_INFO,
-                        LOG_IP,
+                        LOG_ELS,
                         "%d:0601 FARP-REQ received from DID x%x\n",
                         phba->brd_no, did);
 
@@ -3101,7 +3123,7 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
        /* FARP-RSP received from DID <did> */
        lpfc_printf_log(phba,
                         KERN_INFO,
-                        LOG_IP,
+                        LOG_ELS,
                         "%d:0600 FARP-RSP received from DID x%x\n",
                         phba->brd_no, did);
 
index 19c79a0549a7718cbf4560d3818c23e81c059579..c39564e85e944f62ebed5d8b3d41a6f75daf4ff7 100644 (file)
@@ -525,7 +525,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
        psli = &phba->sli;
        mb = &pmb->mb;
        /* Since we don't do discovery right now, turn these off here */
-       psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+       psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
        psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
        psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
 
@@ -641,7 +641,7 @@ out:
        if (rc == MBX_NOT_FINISHED) {
                mempool_free(pmb, phba->mbox_mem_pool);
                lpfc_disc_flush_list(phba);
-               psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+               psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                phba->hba_state = LPFC_HBA_READY;
@@ -672,6 +672,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 
        memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
               sizeof (struct serv_parm));
+       if (phba->cfg_soft_wwnn)
+               u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
        if (phba->cfg_soft_wwpn)
                u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
        memcpy((uint8_t *) & phba->fc_nodename,
@@ -696,7 +698,7 @@ out:
                    == MBX_NOT_FINISHED) {
                        mempool_free( pmb, phba->mbox_mem_pool);
                        lpfc_disc_flush_list(phba);
-                       psli->ring[(psli->ip_ring)].flag &=
+                       psli->ring[(psli->extra_ring)].flag &=
                            ~LPFC_STOP_IOCB_EVENT;
                        psli->ring[(psli->fcp_ring)].flag &=
                            ~LPFC_STOP_IOCB_EVENT;
@@ -715,6 +717,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 {
        int i;
        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+       struct lpfc_dmabuf *mp;
+       int rc;
+
        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
@@ -793,16 +798,27 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
        if (sparam_mbox) {
                lpfc_read_sparam(phba, sparam_mbox);
                sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
-               lpfc_sli_issue_mbox(phba, sparam_mbox,
+               rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
                                                (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED) {
+                       mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+                       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+                       kfree(mp);
+                       mempool_free(sparam_mbox, phba->mbox_mem_pool);
+                       if (cfglink_mbox)
+                               mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+                       return;
+               }
        }
 
        if (cfglink_mbox) {
                phba->hba_state = LPFC_LOCAL_CFG_LINK;
                lpfc_config_link(phba, cfglink_mbox);
                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
-               lpfc_sli_issue_mbox(phba, cfglink_mbox,
+               rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
                                                (MBX_NOWAIT | MBX_STOP_IOCB));
+               if (rc == MBX_NOT_FINISHED)
+                       mempool_free(cfglink_mbox, phba->mbox_mem_pool);
        }
 }
 
@@ -1067,6 +1083,7 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
                lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
                lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
                lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
+               lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID);
        }
 
        phba->fc_ns_retry = 0;
@@ -1423,7 +1440,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
                        if (iocb->context1 == (uint8_t *) ndlp)
                                return 1;
                }
-       } else if (pring->ringno == psli->ip_ring) {
+       } else if (pring->ringno == psli->extra_ring) {
 
        } else if (pring->ringno == psli->fcp_ring) {
                /* Skip match check if waiting to relogin to FCP target */
@@ -1680,112 +1697,38 @@ lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
 struct lpfc_nodelist *
 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
 {
-       struct lpfc_nodelist *ndlp, *next_ndlp;
+       struct lpfc_nodelist *ndlp;
+       struct list_head *lists[]={&phba->fc_nlpunmap_list,
+                                  &phba->fc_nlpmap_list,
+                                  &phba->fc_plogi_list,
+                                  &phba->fc_adisc_list,
+                                  &phba->fc_reglogin_list,
+                                  &phba->fc_prli_list,
+                                  &phba->fc_npr_list,
+                                  &phba->fc_unused_list};
+       uint32_t search[]={NLP_SEARCH_UNMAPPED,
+                          NLP_SEARCH_MAPPED,
+                          NLP_SEARCH_PLOGI,
+                          NLP_SEARCH_ADISC,
+                          NLP_SEARCH_REGLOGIN,
+                          NLP_SEARCH_PRLI,
+                          NLP_SEARCH_NPR,
+                          NLP_SEARCH_UNUSED};
+       int i;
        uint32_t data1;
 
        spin_lock_irq(phba->host->host_lock);
-       if (order & NLP_SEARCH_UNMAPPED) {
-               list_for_each_entry_safe(ndlp, next_ndlp,
-                                        &phba->fc_nlpunmap_list, nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* FIND node DID unmapped */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0929 FIND node DID unmapped"
-                                               " Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_MAPPED) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* FIND node DID mapped */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0930 FIND node DID mapped "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_PLOGI) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to PLOGI */
-                               /* FIND node DID plogi */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0908 FIND node DID plogi "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_ADISC) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to ADISC */
-                               /* FIND node DID adisc */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0931 FIND node DID adisc "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_REGLOGIN) {
-               list_for_each_entry_safe(ndlp, next_ndlp,
-                                        &phba->fc_reglogin_list, nlp_listp) {
+       for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
+               if (!(order & search[i]))
+                       continue;
+               list_for_each_entry(ndlp, lists[i], nlp_listp) {
                        if (lpfc_matchdid(phba, ndlp, did)) {
-
                                data1 = (((uint32_t) ndlp->nlp_state << 24) |
                                         ((uint32_t) ndlp->nlp_xri << 16) |
                                         ((uint32_t) ndlp->nlp_type << 8) |
                                         ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to REGLOGIN */
-                               /* FIND node DID reglogin */
                                lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0901 FIND node DID reglogin"
+                                               "%d:0929 FIND node DID "
                                                " Data: x%p x%x x%x x%x\n",
                                                phba->brd_no,
                                                ndlp, ndlp->nlp_DID,
@@ -1795,86 +1738,12 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
                        }
                }
        }
-
-       if (order & NLP_SEARCH_PRLI) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to PRLI */
-                               /* FIND node DID prli */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0902 FIND node DID prli "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_NPR) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to NPR */
-                               /* FIND node DID npr */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0903 FIND node DID npr "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
-       if (order & NLP_SEARCH_UNUSED) {
-               list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
-                                       nlp_listp) {
-                       if (lpfc_matchdid(phba, ndlp, did)) {
-
-                               data1 = (((uint32_t) ndlp->nlp_state << 24) |
-                                        ((uint32_t) ndlp->nlp_xri << 16) |
-                                        ((uint32_t) ndlp->nlp_type << 8) |
-                                        ((uint32_t) ndlp->nlp_rpi & 0xff));
-                               /* LOG change to UNUSED */
-                               /* FIND node DID unused */
-                               lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
-                                               "%d:0905 FIND node DID unused "
-                                               "Data: x%p x%x x%x x%x\n",
-                                               phba->brd_no,
-                                               ndlp, ndlp->nlp_DID,
-                                               ndlp->nlp_flag, data1);
-                               spin_unlock_irq(phba->host->host_lock);
-                               return ndlp;
-                       }
-               }
-       }
-
        spin_unlock_irq(phba->host->host_lock);
 
        /* FIND node did <did> NOT FOUND */
-       lpfc_printf_log(phba,
-                       KERN_INFO,
-                       LOG_NODE,
+       lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
                        "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
                        phba->brd_no, did, order);
-
-       /* no match found */
        return NULL;
 }
 
@@ -2036,7 +1905,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
                        if (rc == MBX_NOT_FINISHED) {
                                mempool_free( mbox, phba->mbox_mem_pool);
                                lpfc_disc_flush_list(phba);
-                               psli->ring[(psli->ip_ring)].flag &=
+                               psli->ring[(psli->extra_ring)].flag &=
                                        ~LPFC_STOP_IOCB_EVENT;
                                psli->ring[(psli->fcp_ring)].flag &=
                                        ~LPFC_STOP_IOCB_EVENT;
@@ -2415,7 +2284,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
 
        if (clrlaerr) {
                lpfc_disc_flush_list(phba);
-               psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+               psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
                phba->hba_state = LPFC_HBA_READY;
index eedf98801366790ee7ff1e496754b00b8e593d5b..f79cb61369065007a88ca4dccb6e50c2bb1ec174 100644 (file)
 #define FCELSSIZE             1024     /* maximum ELS transfer size */
 
 #define LPFC_FCP_RING            0     /* ring 0 for FCP initiator commands */
-#define LPFC_IP_RING             1     /* ring 1 for IP commands */
+#define LPFC_EXTRA_RING          1     /* ring 1 for other protocols */
 #define LPFC_ELS_RING            2     /* ring 2 for ELS commands */
 #define LPFC_FCP_NEXT_RING       3
 
 #define SLI2_IOCB_CMD_R0_ENTRIES    172        /* SLI-2 FCP command ring entries */
 #define SLI2_IOCB_RSP_R0_ENTRIES    134        /* SLI-2 FCP response ring entries */
-#define SLI2_IOCB_CMD_R1_ENTRIES      4        /* SLI-2 IP command ring entries */
-#define SLI2_IOCB_RSP_R1_ENTRIES      4        /* SLI-2 IP response ring entries */
+#define SLI2_IOCB_CMD_R1_ENTRIES      4        /* SLI-2 extra command ring entries */
+#define SLI2_IOCB_RSP_R1_ENTRIES      4        /* SLI-2 extra response ring entries */
 #define SLI2_IOCB_CMD_R1XTRA_ENTRIES 36        /* SLI-2 extra FCP cmd ring entries */
 #define SLI2_IOCB_RSP_R1XTRA_ENTRIES 52        /* SLI-2 extra FCP rsp ring entries */
 #define SLI2_IOCB_CMD_R2_ENTRIES     20        /* SLI-2 ELS command ring entries */
@@ -121,6 +121,20 @@ struct lpfc_sli_ct_request {
 
                        uint32_t rsvd[7];
                } rft;
+               struct rff {
+                       uint32_t PortId;
+                       uint8_t reserved[2];
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint8_t feature_res:6;
+                       uint8_t feature_init:1;
+                       uint8_t feature_tgt:1;
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint8_t feature_tgt:1;
+                       uint8_t feature_init:1;
+                       uint8_t feature_res:6;
+#endif
+                       uint8_t type_code;     /* type=8 for FCP */
+               } rff;
                struct rnn {
                        uint32_t PortId;        /* For RNN_ID requests */
                        uint8_t wwnn[8];
@@ -136,6 +150,7 @@ struct lpfc_sli_ct_request {
 #define  SLI_CT_REVISION        1
 #define  GID_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 260)
 #define  RFT_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 228)
+#define  RFF_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 235)
 #define  RNN_REQUEST_SZ         (sizeof(struct lpfc_sli_ct_request) - 252)
 #define  RSNN_REQUEST_SZ        (sizeof(struct lpfc_sli_ct_request))
 
@@ -225,6 +240,7 @@ struct lpfc_sli_ct_request {
 #define  SLI_CTNS_RNN_ID      0x0213
 #define  SLI_CTNS_RCS_ID      0x0214
 #define  SLI_CTNS_RFT_ID      0x0217
+#define  SLI_CTNS_RFF_ID      0x021F
 #define  SLI_CTNS_RSPN_ID     0x0218
 #define  SLI_CTNS_RPT_ID      0x021A
 #define  SLI_CTNS_RIP_NN      0x0235
@@ -1089,12 +1105,6 @@ typedef struct {
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
 
-#define PCI_SUBSYSTEM_ID_LP11000S      0xfc11
-#define PCI_SUBSYSTEM_ID_LP11002S      0xfc12
-#define PCI_SUBSYSTEM_ID_LPE11000S     0xfc21
-#define PCI_SUBSYSTEM_ID_LPE11002S     0xfc22
-#define PCI_SUBSYSTEM_ID_LPE11010S     0xfc2A
-
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
 #define SUPERFLY_JEDEC_ID           0x0020
@@ -1284,6 +1294,10 @@ typedef struct {         /* FireFly BIU registers */
 #define CMD_FCP_IREAD_CX        0x1B
 #define CMD_FCP_ICMND_CR        0x1C
 #define CMD_FCP_ICMND_CX        0x1D
+#define CMD_FCP_TSEND_CX        0x1F
+#define CMD_FCP_TRECEIVE_CX     0x21
+#define CMD_FCP_TRSP_CX                0x23
+#define CMD_FCP_AUTO_TRSP_CX    0x29
 
 #define CMD_ADAPTER_MSG         0x20
 #define CMD_ADAPTER_DUMP        0x22
@@ -1310,6 +1324,9 @@ typedef struct {          /* FireFly BIU registers */
 #define CMD_FCP_IREAD64_CX      0x9B
 #define CMD_FCP_ICMND64_CR      0x9C
 #define CMD_FCP_ICMND64_CX      0x9D
+#define CMD_FCP_TSEND64_CX      0x9F
+#define CMD_FCP_TRECEIVE64_CX   0xA1
+#define CMD_FCP_TRSP64_CX       0xA3
 
 #define CMD_GEN_REQUEST64_CR    0xC2
 #define CMD_GEN_REQUEST64_CX    0xC3
index a5723ad0a0992c9026e759c7a5899cac4f51ea96..afca45cdbcefa0b29356ec8267697c8c029be4ef 100644 (file)
@@ -268,6 +268,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
        kfree(mp);
        pmb->context1 = NULL;
 
+       if (phba->cfg_soft_wwnn)
+               u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn);
        if (phba->cfg_soft_wwpn)
                u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn);
        memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName,
@@ -349,8 +351,8 @@ lpfc_config_port_post(struct lpfc_hba * phba)
        phba->hba_state = LPFC_LINK_DOWN;
 
        /* Only process IOCBs on ring 0 till hba_state is READY */
-       if (psli->ring[psli->ip_ring].cmdringaddr)
-               psli->ring[psli->ip_ring].flag |= LPFC_STOP_IOCB_EVENT;
+       if (psli->ring[psli->extra_ring].cmdringaddr)
+               psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
        if (psli->ring[psli->fcp_ring].cmdringaddr)
                psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
        if (psli->ring[psli->next_ring].cmdringaddr)
@@ -517,7 +519,8 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
        struct lpfc_sli_ring  *pring;
        uint32_t event_data;
 
-       if (phba->work_hs & HS_FFER6) {
+       if (phba->work_hs & HS_FFER6 ||
+           phba->work_hs & HS_FFER5) {
                /* Re-establishing Link */
                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
                                "%d:1301 Re-establishing Link "
@@ -611,7 +614,7 @@ lpfc_handle_latt(struct lpfc_hba * phba)
        pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
        rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
        if (rc == MBX_NOT_FINISHED)
-               goto lpfc_handle_latt_free_mp;
+               goto lpfc_handle_latt_free_mbuf;
 
        /* Clear Link Attention in HA REG */
        spin_lock_irq(phba->host->host_lock);
@@ -621,6 +624,8 @@ lpfc_handle_latt(struct lpfc_hba * phba)
 
        return;
 
+lpfc_handle_latt_free_mbuf:
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
 lpfc_handle_latt_free_mp:
        kfree(mp);
 lpfc_handle_latt_free_pmb:
@@ -802,19 +807,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
 {
        lpfc_vpd_t *vp;
        uint16_t dev_id = phba->pcidev->device;
-       uint16_t dev_subid = phba->pcidev->subsystem_device;
-       uint8_t hdrtype;
        int max_speed;
-       char * ports;
        struct {
                char * name;
                int    max_speed;
-               char * ports;
                char * bus;
-       } m = {"<Unknown>", 0, "", ""};
+       } m = {"<Unknown>", 0, ""};
 
-       pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
-       ports = (hdrtype == 0x80) ? "2-port " : "";
        if (mdp && mdp[0] != '\0'
                && descp && descp[0] != '\0')
                return;
@@ -834,130 +833,93 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
 
        switch (dev_id) {
        case PCI_DEVICE_ID_FIREFLY:
-               m = (typeof(m)){"LP6000", max_speed, "", "PCI"};
+               m = (typeof(m)){"LP6000", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_SUPERFLY:
                if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
-                       m = (typeof(m)){"LP7000", max_speed, "", "PCI"};
+                       m = (typeof(m)){"LP7000", max_speed,  "PCI"};
                else
-                       m = (typeof(m)){"LP7000E", max_speed, "", "PCI"};
+                       m = (typeof(m)){"LP7000E", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_DRAGONFLY:
-               m = (typeof(m)){"LP8000", max_speed, "", "PCI"};
+               m = (typeof(m)){"LP8000", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_CENTAUR:
                if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
-                       m = (typeof(m)){"LP9002", max_speed, "", "PCI"};
+                       m = (typeof(m)){"LP9002", max_speed, "PCI"};
                else
-                       m = (typeof(m)){"LP9000", max_speed, "", "PCI"};
+                       m = (typeof(m)){"LP9000", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_RFLY:
-               m = (typeof(m)){"LP952", max_speed, "", "PCI"};
+               m = (typeof(m)){"LP952", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_PEGASUS:
-               m = (typeof(m)){"LP9802", max_speed, "", "PCI-X"};
+               m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
                break;
        case PCI_DEVICE_ID_THOR:
-               if (hdrtype == 0x80)
-                       m = (typeof(m)){"LP10000DC",
-                                       max_speed, ports, "PCI-X"};
-               else
-                       m = (typeof(m)){"LP10000",
-                                       max_speed, ports, "PCI-X"};
+               m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
                break;
        case PCI_DEVICE_ID_VIPER:
-               m = (typeof(m)){"LPX1000", max_speed, "", "PCI-X"};
+               m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
                break;
        case PCI_DEVICE_ID_PFLY:
-               m = (typeof(m)){"LP982", max_speed, "", "PCI-X"};
+               m = (typeof(m)){"LP982", max_speed, "PCI-X"};
                break;
        case PCI_DEVICE_ID_TFLY:
-               if (hdrtype == 0x80)
-                       m = (typeof(m)){"LP1050DC", max_speed, ports, "PCI-X"};
-               else
-                       m = (typeof(m)){"LP1050", max_speed, ports, "PCI-X"};
+               m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
                break;
        case PCI_DEVICE_ID_HELIOS:
-               if (hdrtype == 0x80)
-                       m = (typeof(m)){"LP11002", max_speed, ports, "PCI-X2"};
-               else
-                       m = (typeof(m)){"LP11000", max_speed, ports, "PCI-X2"};
+               m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
                break;
        case PCI_DEVICE_ID_HELIOS_SCSP:
-               m = (typeof(m)){"LP11000-SP", max_speed, ports, "PCI-X2"};
+               m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
                break;
        case PCI_DEVICE_ID_HELIOS_DCSP:
-               m = (typeof(m)){"LP11002-SP", max_speed, ports, "PCI-X2"};
+               m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
                break;
        case PCI_DEVICE_ID_NEPTUNE:
-               if (hdrtype == 0x80)
-                       m = (typeof(m)){"LPe1002", max_speed, ports, "PCIe"};
-               else
-                       m = (typeof(m)){"LPe1000", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_NEPTUNE_SCSP:
-               m = (typeof(m)){"LPe1000-SP", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_NEPTUNE_DCSP:
-               m = (typeof(m)){"LPe1002-SP", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_BMID:
-               m = (typeof(m)){"LP1150", max_speed, ports, "PCI-X2"};
+               m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
                break;
        case PCI_DEVICE_ID_BSMB:
-               m = (typeof(m)){"LP111", max_speed, ports, "PCI-X2"};
+               m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
                break;
        case PCI_DEVICE_ID_ZEPHYR:
-               if (hdrtype == 0x80)
-                       m = (typeof(m)){"LPe11002", max_speed, ports, "PCIe"};
-               else
-                       m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_ZEPHYR_SCSP:
-               m = (typeof(m)){"LPe11000", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_ZEPHYR_DCSP:
-               m = (typeof(m)){"LPe11002-SP", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_ZMID:
-               m = (typeof(m)){"LPe1150", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_ZSMB:
-               m = (typeof(m)){"LPe111", max_speed, ports, "PCIe"};
+               m = (typeof(m)){"LPe111", max_speed, "PCIe"};
                break;
        case PCI_DEVICE_ID_LP101:
-               m = (typeof(m)){"LP101", max_speed, ports, "PCI-X"};
+               m = (typeof(m)){"LP101", max_speed, "PCI-X"};
                break;
        case PCI_DEVICE_ID_LP10000S:
-               m = (typeof(m)){"LP10000-S", max_speed, ports, "PCI"};
+               m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
                break;
        case PCI_DEVICE_ID_LP11000S:
+               m = (typeof(m)){"LP11000-S", max_speed,
+                       "PCI-X2"};
+               break;
        case PCI_DEVICE_ID_LPE11000S:
-               switch (dev_subid) {
-               case PCI_SUBSYSTEM_ID_LP11000S:
-                       m = (typeof(m)){"LP11000-S", max_speed,
-                                       ports, "PCI-X2"};
-                       break;
-               case PCI_SUBSYSTEM_ID_LP11002S:
-                       m = (typeof(m)){"LP11002-S", max_speed,
-                                       ports, "PCI-X2"};
-                       break;
-               case PCI_SUBSYSTEM_ID_LPE11000S:
-                       m = (typeof(m)){"LPe11000-S", max_speed,
-                                       ports, "PCIe"};
-                       break;
-               case PCI_SUBSYSTEM_ID_LPE11002S:
-                       m = (typeof(m)){"LPe11002-S", max_speed,
-                                       ports, "PCIe"};
-                       break;
-               case PCI_SUBSYSTEM_ID_LPE11010S:
-                       m = (typeof(m)){"LPe11010-S", max_speed,
-                                       "10-port ", "PCIe"};
-                       break;
-               default:
-                       m = (typeof(m)){ NULL };
-                       break;
-               }
+               m = (typeof(m)){"LPe11000-S", max_speed,
+                       "PCIe"};
                break;
        default:
                m = (typeof(m)){ NULL };
@@ -968,8 +930,8 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
                snprintf(mdp, 79,"%s", m.name);
        if (descp && descp[0] == '\0')
                snprintf(descp, 255,
-                        "Emulex %s %dGb %s%s Fibre Channel Adapter",
-                        m.name, m.max_speed, m.ports, m.bus);
+                        "Emulex %s %dGb %s Fibre Channel Adapter",
+                        m.name, m.max_speed, m.bus);
 }
 
 /**************************************************/
@@ -1651,6 +1613,14 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
        if (error)
                goto out_remove_host;
 
+       if (phba->cfg_use_msi) {
+               error = pci_enable_msi(phba->pcidev);
+               if (error)
+                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "%d:0452 "
+                                       "Enable MSI failed, continuing with "
+                                       "IRQ\n", phba->brd_no);
+       }
+
        error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
                                                        LPFC_DRIVER_NAME, phba);
        if (error) {
@@ -1730,6 +1700,7 @@ out_free_irq:
        lpfc_stop_timer(phba);
        phba->work_hba_events = 0;
        free_irq(phba->pcidev->irq, phba);
+       pci_disable_msi(phba->pcidev);
 out_free_sysfs_attr:
        lpfc_free_sysfs_attr(phba);
 out_remove_host:
@@ -1796,6 +1767,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
 
        /* Release the irq reservation */
        free_irq(phba->pcidev->irq, phba);
+       pci_disable_msi(phba->pcidev);
 
        lpfc_cleanup(phba, 0);
        lpfc_stop_timer(phba);
index 62c8ca862e9ec2666decc907d26018a4f3dc83d8..438cbcd9eb136cf4a391ee49387ba3f8f38bea81 100644 (file)
@@ -28,7 +28,7 @@
 #define LOG_NODE                      0x80     /* Node table events */
 #define LOG_MISC                      0x400    /* Miscellaneous events */
 #define LOG_SLI                       0x800    /* SLI events */
-#define LOG_CHK_COND                  0x1000   /* FCP Check condition flag */
+#define LOG_FCP_ERROR                 0x1000   /* log errors, not underruns */
 #define LOG_LIBDFC                    0x2000   /* Libdfc events */
 #define LOG_ALL_MSG                   0xffff   /* LOG all messages */
 
index d5f415007db29dc8335983241bada27bdf2f656b..0c7e731dc45a6cdba715693d4c72ae0526920a7b 100644 (file)
@@ -739,7 +739,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
                            uint32_t evt)
 {
        struct lpfc_iocbq *cmdiocb, *rspiocb;
-       struct lpfc_dmabuf *pcmd, *prsp;
+       struct lpfc_dmabuf *pcmd, *prsp, *mp;
        uint32_t *lp;
        IOCB_t *irsp;
        struct serv_parm *sp;
@@ -829,6 +829,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
                                      NLP_REGLOGIN_LIST);
                        return ndlp->nlp_state;
                }
+               mp = (struct lpfc_dmabuf *)mbox->context1;
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
                mempool_free(mbox, phba->mbox_mem_pool);
        } else {
                mempool_free(mbox, phba->mbox_mem_pool);
@@ -1620,8 +1623,8 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
         * or discovery in progress for this node. Starting discovery
         * here will affect the counting of discovery threads.
         */
-       if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
-               (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
+       if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+               !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
                if (ndlp->nlp_flag & NLP_NPR_ADISC) {
                        ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
                        ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
index 97ae98dc95d04f9d13d55a2da8bbc8c51447f564..c3e68e0d8f7445e426fa0ee28cc35ab5903c37cc 100644 (file)
@@ -297,8 +297,10 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
        uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
        uint32_t resp_info = fcprsp->rspStatus2;
        uint32_t scsi_status = fcprsp->rspStatus3;
+       uint32_t *lp;
        uint32_t host_status = DID_OK;
        uint32_t rsplen = 0;
+       uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
 
        /*
         *  If this is a task management command, there is no
@@ -310,10 +312,25 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
                goto out;
        }
 
-       lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
-                       "%d:0730 FCP command failed: RSP "
-                       "Data: x%x x%x x%x x%x x%x x%x\n",
-                       phba->brd_no, resp_info, scsi_status,
+       if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
+               uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
+               if (snslen > SCSI_SENSE_BUFFERSIZE)
+                       snslen = SCSI_SENSE_BUFFERSIZE;
+
+               if (resp_info & RSP_LEN_VALID)
+                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
+               memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
+       }
+       lp = (uint32_t *)cmnd->sense_buffer;
+
+       if (!scsi_status && (resp_info & RESID_UNDER))
+               logit = LOG_FCP;
+
+       lpfc_printf_log(phba, KERN_WARNING, logit,
+                       "%d:0730 FCP command x%x failed: x%x SNS x%x x%x "
+                       "Data: x%x x%x x%x x%x x%x\n",
+                       phba->brd_no, cmnd->cmnd[0], scsi_status,
+                       be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
                        be32_to_cpu(fcprsp->rspResId),
                        be32_to_cpu(fcprsp->rspSnsLen),
                        be32_to_cpu(fcprsp->rspRspLen),
@@ -328,14 +345,6 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
                }
        }
 
-       if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
-               uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
-               if (snslen > SCSI_SENSE_BUFFERSIZE)
-                       snslen = SCSI_SENSE_BUFFERSIZE;
-
-               memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
-       }
-
        cmnd->resid = 0;
        if (resp_info & RESID_UNDER) {
                cmnd->resid = be32_to_cpu(fcprsp->rspResId);
@@ -378,7 +387,7 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
         */
        } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
                        (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
                        "%d:0734 FCP Read Check Error Data: "
                        "x%x x%x x%x x%x\n", phba->brd_no,
                        be32_to_cpu(fcpcmd->fcpDl),
@@ -670,6 +679,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
        struct lpfc_iocbq *iocbqrsp;
        int ret;
 
+       if (!rdata->pnode)
+               return FAILED;
+
        lpfc_cmd->rdata = rdata;
        ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun,
                                           FCP_TARGET_RESET);
@@ -976,20 +988,34 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
 
        lpfc_block_error_handler(cmnd);
        spin_lock_irq(shost->host_lock);
+       loopcnt = 0;
        /*
         * If target is not in a MAPPED state, delay the reset until
         * target is rediscovered or devloss timeout expires.
         */
        while ( 1 ) {
                if (!pnode)
-                       break;
+                       return FAILED;
 
                if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
                        spin_unlock_irq(phba->host->host_lock);
                        schedule_timeout_uninterruptible(msecs_to_jiffies(500));
                        spin_lock_irq(phba->host->host_lock);
+                       loopcnt++;
+                       rdata = cmnd->device->hostdata;
+                       if (!rdata ||
+                               (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
+                               lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+                                       "%d:0721 LUN Reset rport failure:"
+                                       " cnt x%x rdata x%p\n",
+                                       phba->brd_no, loopcnt, rdata);
+                               goto out;
+                       }
+                       pnode = rdata->pnode;
+                       if (!pnode)
+                               return FAILED;
                }
-               if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
+               if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
                        break;
        }
 
index 582f5ea4e84e3c7b9f00c59ae58f5fab2af41925..a4128e19338af4e74e843cfcc1588c74eadde0fe 100644 (file)
@@ -117,6 +117,10 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
        case CMD_FCP_IREAD_CX:
        case CMD_FCP_ICMND_CR:
        case CMD_FCP_ICMND_CX:
+       case CMD_FCP_TSEND_CX:
+       case CMD_FCP_TRSP_CX:
+       case CMD_FCP_TRECEIVE_CX:
+       case CMD_FCP_AUTO_TRSP_CX:
        case CMD_ADAPTER_MSG:
        case CMD_ADAPTER_DUMP:
        case CMD_XMIT_SEQUENCE64_CR:
@@ -131,6 +135,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
        case CMD_FCP_IREAD64_CX:
        case CMD_FCP_ICMND64_CR:
        case CMD_FCP_ICMND64_CX:
+       case CMD_FCP_TSEND64_CX:
+       case CMD_FCP_TRSP64_CX:
+       case CMD_FCP_TRECEIVE64_CX:
        case CMD_GEN_REQUEST64_CR:
        case CMD_GEN_REQUEST64_CX:
        case CMD_XMIT_ELS_RSP64_CX:
@@ -1098,6 +1105,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
                lpfc_sli_pcimem_bcopy((uint32_t *) entry,
                                      (uint32_t *) &rspiocbq.iocb,
                                      sizeof (IOCB_t));
+               INIT_LIST_HEAD(&(rspiocbq.list));
                irsp = &rspiocbq.iocb;
 
                type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
@@ -1149,6 +1157,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
                                }
                        }
                        break;
+               case LPFC_UNSOL_IOCB:
+                       spin_unlock_irqrestore(phba->host->host_lock, iflag);
+                       lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
+                       spin_lock_irqsave(phba->host->host_lock, iflag);
+                       break;
                default:
                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
                                char adaptermsg[LPFC_MAX_ADPTMSG];
@@ -2472,13 +2485,17 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
        psli = &phba->sli;
 
        /* Adjust cmd/rsp ring iocb entries more evenly */
+
+       /* Take some away from the FCP ring */
        pring = &psli->ring[psli->fcp_ring];
        pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
        pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
        pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
        pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
 
-       pring = &psli->ring[1];
+       /* and give them to the extra ring */
+       pring = &psli->ring[psli->extra_ring];
+
        pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
        pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
        pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
@@ -2488,8 +2505,8 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
        pring->iotag_max = 4096;
        pring->num_mask = 1;
        pring->prt[0].profile = 0;      /* Mask 0 */
-       pring->prt[0].rctl = FC_UNSOL_DATA;
-       pring->prt[0].type = 5;
+       pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
+       pring->prt[0].type = phba->cfg_multi_ring_type;
        pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
        return 0;
 }
@@ -2505,7 +2522,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
        psli->sli_flag = 0;
        psli->fcp_ring = LPFC_FCP_RING;
        psli->next_ring = LPFC_FCP_NEXT_RING;
-       psli->ip_ring = LPFC_IP_RING;
+       psli->extra_ring = LPFC_EXTRA_RING;
 
        psli->iocbq_lookup = NULL;
        psli->iocbq_lookup_len = 0;
@@ -2528,7 +2545,7 @@ lpfc_sli_setup(struct lpfc_hba *phba)
                        pring->fast_iotag = pring->iotag_max;
                        pring->num_mask = 0;
                        break;
-               case LPFC_IP_RING:      /* ring 1 - IP */
+               case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
                        /* numCiocb and numRiocb are used in config_port */
                        pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
                        pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
@@ -3238,6 +3255,21 @@ lpfc_intr_handler(int irq, void *dev_id)
                lpfc_sli_handle_fast_ring_event(phba,
                                                &phba->sli.ring[LPFC_FCP_RING],
                                                status);
+
+       if (phba->cfg_multi_ring_support == 2) {
+               /*
+                * Process all events on extra ring.  Take the optimized path
+                * for extra ring IO.  Any other IO is slow path and is handled
+                * by the worker thread.
+                */
+               status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
+               status >>= (4*LPFC_EXTRA_RING);
+               if (status & HA_RXATT) {
+                       lpfc_sli_handle_fast_ring_event(phba,
+                                       &phba->sli.ring[LPFC_EXTRA_RING],
+                                       status);
+               }
+       }
        return IRQ_HANDLED;
 
 } /* lpfc_intr_handler */
index e26de6809358a8529b5df6b2a3e88611be709cbd..a43549959dc7bb45bd9ebabcd8121afdcf201a00 100644 (file)
@@ -198,7 +198,7 @@ struct lpfc_sli {
        int fcp_ring;           /* ring used for FCP initiator commands */
        int next_ring;
 
-       int ip_ring;            /* ring used for IP network drv cmds */
+       int extra_ring;         /* extra ring used for other protocols */
 
        struct lpfc_sli_stat slistat;   /* SLI statistical info */
        struct list_head mboxq;
index ac417908b4071378c2b1d8497d2e2be88c34447b..a61ef3d1e7f1b08090403de95e4547246c65420f 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.1.10"
+#define LPFC_DRIVER_VERSION "8.1.11"
 
 #define LPFC_DRIVER_NAME "lpfc"
 
index 86099fde1b2a6bc13866835757861e74d1b41d07..77d9d3804ccfd02094a4aba9054081c233ed8427 100644 (file)
@@ -73,10 +73,10 @@ static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
 module_param(max_mbox_busy_wait, ushort, 0);
 MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
 
-#define RDINDOOR(adapter)              readl((adapter)->base + 0x20)
-#define RDOUTDOOR(adapter)             readl((adapter)->base + 0x2C)
-#define WRINDOOR(adapter,value)                writel(value, (adapter)->base + 0x20)
-#define WROUTDOOR(adapter,value)       writel(value, (adapter)->base + 0x2C)
+#define RDINDOOR(adapter)      readl((adapter)->mmio_base + 0x20)
+#define RDOUTDOOR(adapter)     readl((adapter)->mmio_base + 0x2C)
+#define WRINDOOR(adapter,value)         writel(value, (adapter)->mmio_base + 0x20)
+#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
 
 /*
  * Global variables
@@ -1386,7 +1386,8 @@ megaraid_isr_memmapped(int irq, void *devp)
 
                handled = 1;
 
-               while( RDINDOOR(adapter) & 0x02 ) cpu_relax();
+               while( RDINDOOR(adapter) & 0x02 )
+                       cpu_relax();
 
                mega_cmd_done(adapter, completed, nstatus, status);
 
@@ -4668,6 +4669,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                host->host_no, mega_baseport, irq);
 
        adapter->base = mega_baseport;
+       if (flag & BOARD_MEMMAP)
+               adapter->mmio_base = (void __iomem *) mega_baseport;
 
        INIT_LIST_HEAD(&adapter->free_list);
        INIT_LIST_HEAD(&adapter->pending_list);
index 66529f11d23cca784e73c0cf1738d62c0f43e4c2..c6e74643abe29be7f7d52ad29b7fd8634c548c4d 100644 (file)
@@ -801,7 +801,8 @@ typedef struct {
                                   clustering is available */
        u32     flag;
 
-       unsigned long   base;
+       unsigned long           base;
+       void __iomem            *mmio_base;
 
        /* mbox64 with mbox not aligned on 16-byte boundry */
        mbox64_t        *una_mbox64;
index 7e4262f2af96dd000940ae1000a1d6319f826ef8..046223b4ae5747255082cc1747a3afaa5a02afc1 100644 (file)
@@ -517,7 +517,7 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
  * Returns the number of frames required for numnber of sge's (sge_count)
  */
 
-u32 megasas_get_frame_count(u8 sge_count)
+static u32 megasas_get_frame_count(u8 sge_count)
 {
        int num_cnt;
        int sge_bytes;
@@ -1733,7 +1733,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
  *
  * Tasklet to complete cmds
  */
-void megasas_complete_cmd_dpc(unsigned long instance_addr)
+static void megasas_complete_cmd_dpc(unsigned long instance_addr)
 {
        u32 producer;
        u32 consumer;
index adb8eb4f5fd1a566a484f63dcf2bcf1a9ed8cfd0..bbf521cbc55d135747965d5390d17d70c312343b 100644 (file)
@@ -589,10 +589,12 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
 static struct ncr_driver_setup
        driver_setup                    = SCSI_NCR_DRIVER_SETUP;
 
+#ifndef MODULE
 #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
 static struct ncr_driver_setup
        driver_safe_setup __initdata    = SCSI_NCR_DRIVER_SAFE_SETUP;
 #endif
+#endif /* !MODULE */
 
 #define initverbose (driver_setup.verbose)
 #define bootverbose (np->verbose)
@@ -641,6 +643,13 @@ static struct ncr_driver_setup
 #define OPT_IARB               26
 #endif
 
+#ifdef MODULE
+#define        ARG_SEP ' '
+#else
+#define        ARG_SEP ','
+#endif
+
+#ifndef MODULE
 static char setup_token[] __initdata = 
        "tags:"   "mpar:"
        "spar:"   "disc:"
@@ -660,12 +669,6 @@ static char setup_token[] __initdata =
 #endif
        ;       /* DONNOT REMOVE THIS ';' */
 
-#ifdef MODULE
-#define        ARG_SEP ' '
-#else
-#define        ARG_SEP ','
-#endif
-
 static int __init get_setup_token(char *p)
 {
        char *cur = setup_token;
@@ -682,7 +685,6 @@ static int __init get_setup_token(char *p)
        return 0;
 }
 
-
 static int __init sym53c8xx__setup(char *str)
 {
 #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT
@@ -804,6 +806,7 @@ static int __init sym53c8xx__setup(char *str)
 #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */
        return 1;
 }
+#endif /* !MODULE */
 
 /*===================================================================
 **
@@ -8321,12 +8324,12 @@ char *ncr53c8xx;        /* command line passed by insmod */
 module_param(ncr53c8xx, charp, 0);
 #endif
 
+#ifndef MODULE
 static int __init ncr53c8xx_setup(char *str)
 {
        return sym53c8xx__setup(str);
 }
 
-#ifndef MODULE
 __setup("ncr53c8xx=", ncr53c8xx_setup);
 #endif
 
index dd67a68c5c23961e5617feda7c1686d0ada8227e..c116a6ae3c54f21caa66f3ca26b69cbdae92b3e1 100644 (file)
@@ -72,12 +72,12 @@ static void dma_advance_sg(Scsi_Cmnd *);
 static int  oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
 
 #ifdef USE_BOTTOM_HALF
-static void dma_commit(void *opaque);
+static void dma_commit(struct work_struct *unused);
 
 long oktag_to_io(long *paddr, long *addr, long len);
 long oktag_from_io(long *addr, long *paddr, long len);
 
-static DECLARE_WORK(tq_fake_dma, dma_commit, NULL);
+static DECLARE_WORK(tq_fake_dma, dma_commit);
 
 #define DMA_MAXTRANSFER 0x8000
 
@@ -266,7 +266,7 @@ oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
  */
  
  
-static void dma_commit(void *opaque)
+static void dma_commit(struct work_struct *unused)
 {
     long wait,len2,pos;
     struct NCR_ESP *esp;
index ee449b29fc82cb82cca0cbd0bcfa31ff7ad61dcd..aad362ba02e0d81f93978bf61a7801775c773181 100644 (file)
@@ -154,16 +154,11 @@ static int aha152x_config_cs(struct pcmcia_device *link)
     
     DEBUG(0, "aha152x_config(0x%p)\n", link);
 
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.TupleData = tuple_data;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
-
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+    tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     while (1) {
        if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
index 85f7ffac19a0ef589cbc9c4643c8ef9b6f7a0f71..a1c5f265069f0b7d6db2b8e6e7688a57f381a40d 100644 (file)
@@ -136,14 +136,9 @@ static int fdomain_config(struct pcmcia_device *link)
 
     DEBUG(0, "fdomain_config(0x%p)\n", link);
 
-    tuple.DesiredTuple = CISTPL_CONFIG;
     tuple.TupleData = tuple_data;
     tuple.TupleDataMax = 64;
     tuple.TupleOffset = 0;
-    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-    CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-    link->conf.ConfigBase = parse.config.base;
 
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
index f2d79c3f0b8eef641fbaf42187133782c8101fee..d72df5dae4ee02887cf83eb2d9ccde89572e0bba 100644 (file)
@@ -1685,16 +1685,10 @@ static int nsp_cs_config(struct pcmcia_device *link)
 
        nsp_dbg(NSP_DEBUG_INIT, "in");
 
-       tuple.DesiredTuple    = CISTPL_CONFIG;
        tuple.Attributes      = 0;
        tuple.TupleData       = tuple_data;
        tuple.TupleDataMax    = sizeof(tuple_data);
        tuple.TupleOffset     = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData,  pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple,    pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present    = parse.config.rmask[0];
 
        /* Look up the current Vcc */
        CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link, &conf));
index 86c2ac6ae6239abfd374c29cb98595610fed28dd..9d431fe7f47ff97bf29757e88f608a8540384f08 100644 (file)
@@ -208,18 +208,11 @@ static int qlogic_config(struct pcmcia_device * link)
 
        DEBUG(0, "qlogic_config(0x%p)\n", link);
 
+       info->manf_id = link->manf_id;
+
        tuple.TupleData = (cisdata_t *) tuple_data;
        tuple.TupleDataMax = 64;
        tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-
-       tuple.DesiredTuple = CISTPL_MANFID;
-       if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) && (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
-               info->manf_id = le16_to_cpu(tuple.TupleData[0]);
 
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
index 72fe5d055de12875eaf8a5feb79a3bbaf058dbdd..fb7acea60286170e3b7c4cf0c44a41269b63c3ed 100644 (file)
@@ -722,19 +722,11 @@ SYM53C500_config(struct pcmcia_device *link)
 
        DEBUG(0, "SYM53C500_config(0x%p)\n", link);
 
+       info->manf_id = link->manf_id;
+
        tuple.TupleData = (cisdata_t *)tuple_data;
        tuple.TupleDataMax = 64;
        tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-
-       tuple.DesiredTuple = CISTPL_MANFID;
-       if ((pcmcia_get_first_tuple(link, &tuple) == CS_SUCCESS) &&
-           (pcmcia_get_tuple_data(link, &tuple) == CS_SUCCESS))
-               info->manf_id = le16_to_cpu(tuple.TupleData[0]);
 
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
index 89a2a9f11e41f6352f287ed976a0fc58e9abbbb5..584ba4d6e0389d277c10422bcfb3d3183ff695e2 100644 (file)
@@ -31,7 +31,7 @@ typedef struct {
        int base;               /* Actual port address          */
        int mode;               /* Transfer mode                */
        struct scsi_cmnd *cur_cmd;      /* Current queued command       */
-       struct work_struct ppa_tq;      /* Polling interrupt stuff       */
+       struct delayed_work ppa_tq;     /* Polling interrupt stuff       */
        unsigned long jstart;   /* Jiffies at start             */
        unsigned long recon_tmo;        /* How many usecs to wait for reconnection (6th bit) */
        unsigned int failed:1;  /* Failure flag                 */
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd)
  * the scheduler's task queue to generate a stream of call-backs and
  * complete the request when the drive is ready.
  */
-static void ppa_interrupt(void *data)
+static void ppa_interrupt(struct work_struct *work)
 {
-       ppa_struct *dev = (ppa_struct *) data;
+       ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
        struct scsi_cmnd *cmd = dev->cur_cmd;
 
        if (!cmd) {
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data)
                return;
        }
        if (ppa_engine(dev, cmd)) {
-               dev->ppa_tq.data = (void *) dev;
                schedule_delayed_work(&dev->ppa_tq, 1);
                return;
        }
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
        cmd->result = DID_ERROR << 16;  /* default return code */
        cmd->SCp.phase = 0;     /* bus free */
 
-       dev->ppa_tq.data = dev;
-       schedule_work(&dev->ppa_tq);
+       schedule_delayed_work(&dev->ppa_tq, 0);
 
        ppa_pb_claim(dev);
 
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb)
        else
                ports = 8;
 
-       INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev);
+       INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
 
        err = -ENOMEM;
        host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
index 285c8e8ff1a09658ddeb65c07348899da9aa316f..7b18a6c7b7eb0de125e2cd7bfb2581c786378de5 100644 (file)
@@ -390,7 +390,7 @@ static struct sysfs_entry {
        { "optrom_ctl", &sysfs_optrom_ctl_attr, },
        { "vpd", &sysfs_vpd_attr, 1 },
        { "sfp", &sysfs_sfp_attr, 1 },
-       { 0 },
+       { NULL },
 };
 
 void
index 08cb5e3fb55319d7eba7965f5863872aac1193f1..a823f0bc519dbf8e7c921d2fb3924a94f1aebab8 100644 (file)
@@ -59,9 +59,6 @@ int
 qla2x00_initialize_adapter(scsi_qla_host_t *ha)
 {
        int     rval;
-       uint8_t restart_risc = 0;
-       uint8_t retry;
-       uint32_t wait_time;
 
        /* Clear adapter flags. */
        ha->flags.online = 0;
@@ -104,87 +101,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
 
        qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
 
-       retry = 10;
-       /*
-        * Try to configure the loop.
-        */
-       do {
-               restart_risc = 0;
-
-               /* If firmware needs to be loaded */
-               if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
-                       if ((rval = ha->isp_ops.chip_diag(ha)) == QLA_SUCCESS) {
-                               rval = qla2x00_setup_chip(ha);
-                       }
-               }
-
-               if (rval == QLA_SUCCESS &&
-                   (rval = qla2x00_init_rings(ha)) == QLA_SUCCESS) {
-check_fw_ready_again:
-                       /*
-                        * Wait for a successful LIP up to a maximum
-                        * of (in seconds): RISC login timeout value,
-                        * RISC retry count value, and port down retry
-                        * value OR a minimum of 4 seconds OR If no
-                        * cable, only 5 seconds.
-                        */
-                       rval = qla2x00_fw_ready(ha);
-                       if (rval == QLA_SUCCESS) {
-                               clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-
-                               /* Issue a marker after FW becomes ready. */
-                               qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
-
-                               /*
-                                * Wait at most MAX_TARGET RSCNs for a stable
-                                * link.
-                                */
-                               wait_time = 256;
-                               do {
-                                       clear_bit(LOOP_RESYNC_NEEDED,
-                                           &ha->dpc_flags);
-                                       rval = qla2x00_configure_loop(ha);
-
-                                       if (test_and_clear_bit(ISP_ABORT_NEEDED,
-                                           &ha->dpc_flags)) {
-                                               restart_risc = 1;
-                                               break;
-                                       }
-
-                                       /*
-                                        * If loop state change while we were
-                                        * discoverying devices then wait for
-                                        * LIP to complete
-                                        */
-
-                                       if (atomic_read(&ha->loop_state) !=
-                                           LOOP_READY && retry--) {
-                                               goto check_fw_ready_again;
-                                       }
-                                       wait_time--;
-                               } while (!atomic_read(&ha->loop_down_timer) &&
-                                   retry &&
-                                   wait_time &&
-                                   (test_bit(LOOP_RESYNC_NEEDED,
-                                       &ha->dpc_flags)));
-
-                               if (wait_time == 0)
-                                       rval = QLA_FUNCTION_FAILED;
-                       } else if (ha->device_flags & DFLG_NO_CABLE)
-                               /* If no cable, then all is good. */
-                               rval = QLA_SUCCESS;
-               }
-       } while (restart_risc && retry--);
-
-       if (rval == QLA_SUCCESS) {
-               clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
-               qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
-               ha->marker_needed = 0;
-
-               ha->flags.online = 1;
-       } else {
-               DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
+       if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) {
+               rval = ha->isp_ops.chip_diag(ha);
+               if (rval)
+                       return (rval);
+               rval = qla2x00_setup_chip(ha);
+               if (rval)
+                       return (rval);
        }
+       rval = qla2x00_init_rings(ha);
 
        return (rval);
 }
@@ -2208,8 +2133,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
 
        atomic_set(&fcport->state, FCS_ONLINE);
 
-       if (ha->flags.init_done)
-               qla2x00_reg_remote_port(ha, fcport);
+       qla2x00_reg_remote_port(ha, fcport);
 }
 
 void
index 208607be78c7267a5770974b8d4be12d4b839b87..d03523d3bf38b73d173ce07f9a55cfc2d6067335 100644 (file)
@@ -24,7 +24,7 @@ char qla2x00_version_str[40];
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Ioctl related information.
@@ -95,6 +95,8 @@ MODULE_PARM_DESC(ql2xqfullrampup,
  */
 static int qla2xxx_slave_configure(struct scsi_device * device);
 static int qla2xxx_slave_alloc(struct scsi_device *);
+static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time);
+static void qla2xxx_scan_start(struct Scsi_Host *);
 static void qla2xxx_slave_destroy(struct scsi_device *);
 static int qla2x00_queuecommand(struct scsi_cmnd *cmd,
                void (*fn)(struct scsi_cmnd *));
@@ -124,6 +126,8 @@ static struct scsi_host_template qla2x00_driver_template = {
 
        .slave_alloc            = qla2xxx_slave_alloc,
        .slave_destroy          = qla2xxx_slave_destroy,
+       .scan_finished          = qla2xxx_scan_finished,
+       .scan_start             = qla2xxx_scan_start,
        .change_queue_depth     = qla2x00_change_queue_depth,
        .change_queue_type      = qla2x00_change_queue_type,
        .this_id                = -1,
@@ -287,7 +291,7 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
        return str;
 }
 
-char *
+static char *
 qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
 {
        char un_str[10];
@@ -325,7 +329,7 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
        return (str);
 }
 
-char *
+static char *
 qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
 {
        sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
@@ -634,7 +638,7 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
 * Note:
 *    Only return FAILED if command not returned by firmware.
 **************************************************************************/
-int
+static int
 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -771,7 +775,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
 *    SUCCESS/FAILURE (defined as macro in scsi.h).
 *
 **************************************************************************/
-int
+static int
 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -902,7 +906,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha)
 *    SUCCESS/FAILURE (defined as macro in scsi.h).
 *
 **************************************************************************/
-int
+static int
 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -963,7 +967,7 @@ eh_bus_reset_done:
 *
 * Note:
 **************************************************************************/
-int
+static int
 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
 {
        scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
@@ -1366,6 +1370,29 @@ qla24xx_disable_intrs(scsi_qla_host_t *ha)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
+static void
+qla2xxx_scan_start(struct Scsi_Host *shost)
+{
+       scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
+
+       set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
+       set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
+       set_bit(RSCN_UPDATE, &ha->dpc_flags);
+}
+
+static int
+qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       scsi_qla_host_t *ha = (scsi_qla_host_t *)shost->hostdata;
+
+       if (!ha->host)
+               return 1;
+       if (time > ha->loop_reset_delay * HZ)
+               return 1;
+
+       return atomic_read(&ha->loop_state) == LOOP_READY;
+}
+
 /*
  * PCI driver interface
  */
@@ -1377,10 +1404,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        struct Scsi_Host *host;
        scsi_qla_host_t *ha;
        unsigned long   flags = 0;
-       unsigned long   wait_switch = 0;
        char pci_info[20];
        char fw_str[30];
-       fc_port_t *fcport;
        struct scsi_host_template *sht;
 
        if (pci_enable_device(pdev))
@@ -1631,30 +1656,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ha->isp_ops.enable_intrs(ha);
 
-       /* v2.19.5b6 */
-       /*
-        * Wait around max loop_reset_delay secs for the devices to come
-        * on-line. We don't want Linux scanning before we are ready.
-        *
-        */
-       for (wait_switch = jiffies + (ha->loop_reset_delay * HZ);
-           time_before(jiffies,wait_switch) &&
-            !(ha->device_flags & (DFLG_NO_CABLE | DFLG_FABRIC_DEVICES))
-            && (ha->device_flags & SWITCH_FOUND) ;) {
-
-               qla2x00_check_fabric_devices(ha);
-
-               msleep(10);
-       }
-
        pci_set_drvdata(pdev, ha);
+
        ha->flags.init_done = 1;
+       ha->flags.online = 1;
+
        num_hosts++;
 
        ret = scsi_add_host(host, &pdev->dev);
        if (ret)
                goto probe_failed;
 
+       scsi_scan_host(host);
+
        qla2x00_alloc_sysfs_attr(ha);
 
        qla2x00_init_host_attr(ha);
@@ -1669,10 +1683,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no,
            ha->isp_ops.fw_version_str(ha, fw_str));
 
-       /* Go with fc_rport registration. */
-       list_for_each_entry(fcport, &ha->fcports, list)
-               qla2x00_reg_remote_port(ha, fcport);
-
        return 0;
 
 probe_failed:
index c71dbd5bd5433e78cf8bc4fe2746253f89f4520a..15390ad87456399a774ac65e25a5f519216adf15 100644 (file)
@@ -449,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
        return FARX_ACCESS_NVRAM_DATA | naddr;
 }
 
-uint32_t
+static uint32_t
 qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
 {
        int rval;
@@ -490,7 +490,7 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
        return dwptr;
 }
 
-int
+static int
 qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
 {
        int rval;
@@ -512,7 +512,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
        return rval;
 }
 
-void
+static void
 qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
     uint8_t *flash_id)
 {
@@ -537,7 +537,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
        }
 }
 
-int
+static int
 qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
     uint32_t dwords)
 {
index 752031fadfef56c2ee5f0617af4ab07d30750df2..7b4e077a39c18c770bbd42a80c7696ea83066e8e 100644 (file)
@@ -71,7 +71,7 @@ void __dump_registers(struct scsi_qla_host *ha)
                       readw(&ha->reg->u1.isp4010.nvram));
        }
 
-       else if (is_qla4022(ha)) {
+       else if (is_qla4022(ha) | is_qla4032(ha)) {
                printk(KERN_INFO "0x%02X intr_mask       = 0x%08X\n",
                       (uint8_t) offsetof(struct isp_reg,
                                          u1.isp4022.intr_mask),
@@ -119,7 +119,7 @@ void __dump_registers(struct scsi_qla_host *ha)
                       readw(&ha->reg->u2.isp4010.port_err_status));
        }
 
-       else if (is_qla4022(ha)) {
+       else if (is_qla4022(ha) | is_qla4032(ha)) {
                printk(KERN_INFO "Page 0 Registers:\n");
                printk(KERN_INFO "0x%02X ext_hw_conf     = 0x%08X\n",
                       (uint8_t) offsetof(struct isp_reg,
index a7f6c7b1c59004835e953b6fbbee4f2034025af3..4249e52a559290a7713dc22af921933ef7b4d5f3 100644 (file)
 
 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
 #define PCI_DEVICE_ID_QLOGIC_ISP4022   0x4022
-#endif                         /*  */
+#endif
+
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP4032
+#define PCI_DEVICE_ID_QLOGIC_ISP4032   0x4032
+#endif
 
 #define QLA_SUCCESS                    0
 #define QLA_ERROR                      1
@@ -277,7 +281,6 @@ struct scsi_qla_host {
 #define AF_INTERRUPTS_ON             6 /* 0x00000040 Not Used */
 #define AF_GET_CRASH_RECORD          7 /* 0x00000080 */
 #define AF_LINK_UP                   8 /* 0x00000100 */
-#define AF_TOPCAT_CHIP_PRESENT       9 /* 0x00000200 */
 #define AF_IRQ_ATTACHED                     10 /* 0x00000400 */
 #define AF_ISNS_CMD_IN_PROCESS      12 /* 0x00001000 */
 #define AF_ISNS_CMD_DONE            13 /* 0x00002000 */
@@ -317,16 +320,17 @@ struct scsi_qla_host {
        /* NVRAM registers */
        struct eeprom_data *nvram;
        spinlock_t hardware_lock ____cacheline_aligned;
-       spinlock_t list_lock;
        uint32_t   eeprom_cmd_data;
 
        /* Counters for general statistics */
+       uint64_t isr_count;
        uint64_t adapter_error_count;
        uint64_t device_error_count;
        uint64_t total_io_count;
        uint64_t total_mbytes_xferred;
        uint64_t link_failure_count;
        uint64_t invalid_crc_count;
+       uint32_t bytes_xfered;
        uint32_t spurious_int_count;
        uint32_t aborted_io_count;
        uint32_t io_timeout_count;
@@ -438,6 +442,11 @@ static inline int is_qla4022(struct scsi_qla_host *ha)
        return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
 }
 
+static inline int is_qla4032(struct scsi_qla_host *ha)
+{
+       return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
+}
+
 static inline int adapter_up(struct scsi_qla_host *ha)
 {
        return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
@@ -451,58 +460,58 @@ static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
 
 static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u1.isp4022.semaphore :
-               &ha->reg->u1.isp4010.nvram);
+       return (is_qla4010(ha) ?
+               &ha->reg->u1.isp4010.nvram :
+               &ha->reg->u1.isp4022.semaphore);
 }
 
 static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u1.isp4022.nvram :
-               &ha->reg->u1.isp4010.nvram);
+       return (is_qla4010(ha) ?
+               &ha->reg->u1.isp4010.nvram :
+               &ha->reg->u1.isp4022.nvram);
 }
 
 static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u2.isp4022.p0.ext_hw_conf :
-               &ha->reg->u2.isp4010.ext_hw_conf);
+       return (is_qla4010(ha) ?
+               &ha->reg->u2.isp4010.ext_hw_conf :
+               &ha->reg->u2.isp4022.p0.ext_hw_conf);
 }
 
 static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u2.isp4022.p0.port_status :
-               &ha->reg->u2.isp4010.port_status);
+       return (is_qla4010(ha) ?
+               &ha->reg->u2.isp4010.port_status :
+               &ha->reg->u2.isp4022.p0.port_status);
 }
 
 static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u2.isp4022.p0.port_ctrl :
-               &ha->reg->u2.isp4010.port_ctrl);
+       return (is_qla4010(ha) ?
+               &ha->reg->u2.isp4010.port_ctrl :
+               &ha->reg->u2.isp4022.p0.port_ctrl);
 }
 
 static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u2.isp4022.p0.port_err_status :
-               &ha->reg->u2.isp4010.port_err_status);
+       return (is_qla4010(ha) ?
+               &ha->reg->u2.isp4010.port_err_status :
+               &ha->reg->u2.isp4022.p0.port_err_status);
 }
 
 static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               &ha->reg->u2.isp4022.p0.gp_out :
-               &ha->reg->u2.isp4010.gp_out);
+       return (is_qla4010(ha) ?
+               &ha->reg->u2.isp4010.gp_out :
+               &ha->reg->u2.isp4022.p0.gp_out);
 }
 
 static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
 {
-       return (is_qla4022(ha) ?
-               offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
-               offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
+       return (is_qla4010(ha) ?
+               offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2 :
+               offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2);
 }
 
 int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
@@ -511,59 +520,59 @@ int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
 
 static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
+       if (is_qla4010(a))
+               return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
+                                          QL4010_FLASH_SEM_BITS);
+       else
                return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
                                           (QL4022_RESOURCE_BITS_BASE_CODE |
                                            (a->mac_index)) << 13);
-       else
-               return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
-                                          QL4010_FLASH_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
-               ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
-       else
+       if (is_qla4010(a))
                ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
+       else
+               ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
 }
 
 static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
+       if (is_qla4010(a))
+               return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
+                                          QL4010_NVRAM_SEM_BITS);
+       else
                return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
                                           (QL4022_RESOURCE_BITS_BASE_CODE |
                                            (a->mac_index)) << 10);
-       else
-               return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
-                                          QL4010_NVRAM_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
-               ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
-       else
+       if (is_qla4010(a))
                ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
+       else
+               ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
 }
 
 static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
+       if (is_qla4010(a))
+               return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
+                                      QL4010_DRVR_SEM_BITS);
+       else
                return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
                                       (QL4022_RESOURCE_BITS_BASE_CODE |
                                        (a->mac_index)) << 1);
-       else
-               return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
-                                      QL4010_DRVR_SEM_BITS);
 }
 
 static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
 {
-       if (is_qla4022(a))
-               ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
-       else
+       if (is_qla4010(a))
                ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
+       else
+               ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
 }
 
 /*---------------------------------------------------------------------------*/
index 427489de64bcdcb6c05929f0781e1a26b13a8ac4..4eea8c571916f49293c7fb1205c5ac4137acf03b 100644 (file)
@@ -296,7 +296,6 @@ static inline uint32_t clr_rmask(uint32_t val)
 /*  ISP Semaphore definitions */
 
 /*  ISP General Purpose Output definitions */
-#define GPOR_TOPCAT_RESET                      0x00000004
 
 /*  shadow registers (DMA'd from HA to system memory.  read only) */
 struct shadow_regs {
@@ -339,10 +338,13 @@ union external_hw_config_reg {
 /*  Mailbox command definitions */
 #define MBOX_CMD_ABOUT_FW                      0x0009
 #define MBOX_CMD_LUN_RESET                     0x0016
+#define MBOX_CMD_GET_MANAGEMENT_DATA           0x001E
 #define MBOX_CMD_GET_FW_STATUS                 0x001F
 #define MBOX_CMD_SET_ISNS_SERVICE              0x0021
 #define ISNS_DISABLE                           0
 #define ISNS_ENABLE                            1
+#define MBOX_CMD_COPY_FLASH                    0x0024
+#define MBOX_CMD_WRITE_FLASH                   0x0025
 #define MBOX_CMD_READ_FLASH                    0x0026
 #define MBOX_CMD_CLEAR_DATABASE_ENTRY          0x0031
 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT                0x0056
@@ -360,10 +362,13 @@ union external_hw_config_reg {
 #define DDB_DS_SESSION_FAILED                  0x06
 #define DDB_DS_LOGIN_IN_PROCESS                        0x07
 #define MBOX_CMD_GET_FW_STATE                  0x0069
+#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
+#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS      0x0087
 
 /* Mailbox 1 */
 #define FW_STATE_READY                         0x0000
 #define FW_STATE_CONFIG_WAIT                   0x0001
+#define FW_STATE_WAIT_LOGIN                    0x0002
 #define FW_STATE_ERROR                         0x0004
 #define FW_STATE_DHCP_IN_PROGRESS              0x0008
 
index 1b221ff0f6f7276ea57392953aa35e6ef30d86dd..2122967bbf0b1a56d9bd28819745bb99aea4882a 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __QLA4x_GBL_H
 #define        __QLA4x_GBL_H
 
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
 int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
@@ -75,4 +76,4 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
 extern int ql4xextended_error_logging;
 extern int ql4xdiscoverywait;
 extern int ql4xdontresethba;
-#endif                         /* _QLA4x_GBL_H */
+#endif /* _QLA4x_GBL_H */
index bb3a1c11f44c9089e9ab1d0fa5074d4f5294301b..cc210f297a78a22e6bf3269ab275964051d48b38 100644 (file)
@@ -259,10 +259,16 @@ static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
                              "seconds expired= %d\n", ha->host_no, __func__,
                              ha->firmware_state, ha->addl_fw_state,
                              timeout_count));
+               if (is_qla4032(ha) &&
+                       !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) &&
+                       (timeout_count < ADAPTER_INIT_TOV - 5)) {
+                       break;
+               }
+
                msleep(1000);
        }                       /* end of for */
 
-       if (timeout_count <= 0)
+       if (timeout_count == 0)
                DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n",
                              ha->host_no, __func__));
 
@@ -806,32 +812,6 @@ int qla4xxx_relogin_device(struct scsi_qla_host *ha,
        return QLA_SUCCESS;
 }
 
-/**
- * qla4010_get_topcat_presence - check if it is QLA4040 TopCat Chip
- * @ha: Pointer to host adapter structure.
- *
- **/
-static int qla4010_get_topcat_presence(struct scsi_qla_host *ha)
-{
-       unsigned long flags;
-       uint16_t topcat;
-
-       if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS)
-               return QLA_ERROR;
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       topcat = rd_nvram_word(ha, offsetof(struct eeprom_data,
-                                           isp4010.topcat));
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
-       if ((topcat & TOPCAT_MASK) == TOPCAT_PRESENT)
-               set_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
-       else
-               clear_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags);
-       ql4xxx_unlock_nvram(ha);
-       return QLA_SUCCESS;
-}
-
-
 static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
 {
        unsigned long flags;
@@ -866,7 +846,7 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
                /* set defaults */
                if (is_qla4010(ha))
                        extHwConfig.Asuint32_t = 0x1912;
-               else if (is_qla4022(ha))
+               else if (is_qla4022(ha) | is_qla4032(ha))
                        extHwConfig.Asuint32_t = 0x0023;
        }
        DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
@@ -927,7 +907,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        writel(jiffies, &ha->reg->mailbox[7]);
-       if (is_qla4022(ha))
+       if (is_qla4022(ha) | is_qla4032(ha))
                writel(set_rmask(NVR_WRITE_ENABLE),
                       &ha->reg->u1.isp4022.nvram);
 
@@ -978,7 +958,7 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
        return status;
 }
 
-static int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
+int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
 {
 #define QL4_LOCK_DRVR_WAIT     300
 #define QL4_LOCK_DRVR_SLEEP    100
@@ -1018,12 +998,7 @@ static int qla4xxx_start_firmware(struct scsi_qla_host *ha)
        int soft_reset = 1;
        int config_chip = 0;
 
-       if (is_qla4010(ha)){
-               if (qla4010_get_topcat_presence(ha) != QLA_SUCCESS)
-                       return QLA_ERROR;
-       }
-
-       if (is_qla4022(ha))
+       if (is_qla4022(ha) | is_qla4032(ha))
                ql4xxx_set_mac_number(ha);
 
        if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS)
index 0d61797af7dadbe2a9d55d032d9ae5c71988ee38..6375eb017dd3c115f9fd21d59747359a4edb9ab4 100644 (file)
@@ -38,7 +38,7 @@ qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
 static inline void
 __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
 {
-       if (is_qla4022(ha)) {
+       if (is_qla4022(ha) | is_qla4032(ha)) {
                writel(set_rmask(IMR_SCSI_INTR_ENABLE),
                       &ha->reg->u1.isp4022.intr_mask);
                readl(&ha->reg->u1.isp4022.intr_mask);
@@ -52,7 +52,7 @@ __qla4xxx_enable_intrs(struct scsi_qla_host *ha)
 static inline void
 __qla4xxx_disable_intrs(struct scsi_qla_host *ha)
 {
-       if (is_qla4022(ha)) {
+       if (is_qla4022(ha) | is_qla4032(ha)) {
                writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
                       &ha->reg->u1.isp4022.intr_mask);
                readl(&ha->reg->u1.isp4022.intr_mask);
index c0a254b89a3008e645b212ae848e06e982ccb3fc..d41ce380eedcbd298185a15db290cb15c946b00a 100644 (file)
@@ -294,6 +294,12 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
                        cmd_entry->control_flags = CF_WRITE;
                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
                        cmd_entry->control_flags = CF_READ;
+
+               ha->bytes_xfered += cmd->request_bufflen;
+               if (ha->bytes_xfered & ~0xFFFFF){
+                       ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
+                       ha->bytes_xfered &= 0xFFFFF;
+               }
        }
 
        /* Set tagged queueing control flags */
index 1e283321a59d2482fcf3b81d4b9c6672a83b46f5..ef975e0dc87fb9a5eef9082dbece9fab735710fa 100644 (file)
@@ -627,6 +627,7 @@ irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
+       ha->isr_count++;
        /*
         * Repeatedly service interrupts up to a maximum of
         * MAX_REQS_SERVICED_PER_INTR
index e3957ca5b645ba6d57cffaa93cab0ce0f0675a78..58afd135aa1de2a32bd565940e371d309200fe3f 100644 (file)
@@ -7,15 +7,22 @@
 
 #include "ql4_def.h"
 
+static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
+{
+       writel(cmd, isp_nvram(ha));
+       readl(isp_nvram(ha));
+       udelay(1);
+}
+
 static inline int eeprom_size(struct scsi_qla_host *ha)
 {
-       return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
+       return is_qla4010(ha) ? FM93C66A_SIZE_16 : FM93C86A_SIZE_16;
 }
 
 static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
 {
-       return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
-               FM93C56A_NO_ADDR_BITS_16;
+       return is_qla4010(ha) ? FM93C56A_NO_ADDR_BITS_16 :
+               FM93C86A_NO_ADDR_BITS_16 ;
 }
 
 static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
@@ -28,8 +35,7 @@ static int fm93c56a_select(struct scsi_qla_host * ha)
        DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
 
        ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
-       writel(ha->eeprom_cmd_data, isp_nvram(ha));
-       readl(isp_nvram(ha));
+       eeprom_cmd(ha->eeprom_cmd_data, ha);
        return 1;
 }
 
@@ -41,12 +47,13 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
        int previousBit;
 
        /* Clock in a zero, then do the start bit. */
-       writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
-       writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-              AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-       writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
-              AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-       readl(isp_nvram(ha));
+       eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, ha);
+
+       eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+              AUBURN_EEPROM_CLK_RISE, ha);
+       eeprom_cmd(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
+              AUBURN_EEPROM_CLK_FALL, ha);
+
        mask = 1 << (FM93C56A_CMD_BITS - 1);
 
        /* Force the previous data bit to be different. */
@@ -60,14 +67,14 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
                         * If the bit changed, then change the DO state to
                         * match.
                         */
-                       writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+                       eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
                        previousBit = dataBit;
                }
-               writel(ha->eeprom_cmd_data | dataBit |
-                      AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-               writel(ha->eeprom_cmd_data | dataBit |
-                      AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-               readl(isp_nvram(ha));
+               eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+                      AUBURN_EEPROM_CLK_RISE, ha);
+               eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+                      AUBURN_EEPROM_CLK_FALL, ha);
+
                cmd = cmd << 1;
        }
        mask = 1 << (eeprom_no_addr_bits(ha) - 1);
@@ -82,14 +89,15 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
                         * If the bit changed, then change the DO state to
                         * match.
                         */
-                       writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
+                       eeprom_cmd(ha->eeprom_cmd_data | dataBit, ha);
+
                        previousBit = dataBit;
                }
-               writel(ha->eeprom_cmd_data | dataBit |
-                      AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-               writel(ha->eeprom_cmd_data | dataBit |
-                      AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-               readl(isp_nvram(ha));
+               eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+                      AUBURN_EEPROM_CLK_RISE, ha);
+               eeprom_cmd(ha->eeprom_cmd_data | dataBit |
+                      AUBURN_EEPROM_CLK_FALL, ha);
+
                addr = addr << 1;
        }
        return 1;
@@ -98,8 +106,7 @@ static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
 static int fm93c56a_deselect(struct scsi_qla_host * ha)
 {
        ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
-       writel(ha->eeprom_cmd_data, isp_nvram(ha));
-       readl(isp_nvram(ha));
+       eeprom_cmd(ha->eeprom_cmd_data, ha);
        return 1;
 }
 
@@ -112,12 +119,13 @@ static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
        /* Read the data bits
         * The first bit is a dummy.  Clock right over it. */
        for (i = 0; i < eeprom_no_data_bits(ha); i++) {
-               writel(ha->eeprom_cmd_data |
-                      AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
-               writel(ha->eeprom_cmd_data |
-                      AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
-               dataBit =
-                       (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+               eeprom_cmd(ha->eeprom_cmd_data |
+                      AUBURN_EEPROM_CLK_RISE, ha);
+               eeprom_cmd(ha->eeprom_cmd_data |
+                      AUBURN_EEPROM_CLK_FALL, ha);
+
+               dataBit = (readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
+
                data = (data << 1) | dataBit;
        }
 
index 08e2aed8c6cc20bdec0eea8fc219e5824081a40e..b47b4fc59d834e1492adf5642b005764cb9f95fc 100644 (file)
@@ -134,9 +134,7 @@ struct eeprom_data {
                        u16 phyConfig;  /* x36 */
 #define         PHY_CONFIG_PHY_ADDR_MASK             0x1f
 #define         PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
-                       u16 topcat;     /* x38 */
-#define TOPCAT_PRESENT         0x0100
-#define TOPCAT_MASK            0xFF00
+                       u16 reserved_56;        /* x38 */
 
 #define EEPROM_UNUSED_1_SIZE   2
                        u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
index 5b8db6109536d26392603d3b635fd48b56ebf473..9ef693c8809aac59ef2d823798fd5592664c042c 100644 (file)
@@ -19,7 +19,7 @@ char qla4xxx_version_str[40];
 /*
  * SRB allocation cache
  */
-static kmem_cache_t *srb_cachep;
+static struct kmem_cache *srb_cachep;
 
 /*
  * Module parameter information and variables
@@ -708,10 +708,10 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
 }
 
 /**
- * qla4010_soft_reset - performs soft reset.
+ * qla4xxx_soft_reset - performs soft reset.
  * @ha: Pointer to host adapter structure.
  **/
-static int qla4010_soft_reset(struct scsi_qla_host *ha)
+int qla4xxx_soft_reset(struct scsi_qla_host *ha)
 {
        uint32_t max_wait_time;
        unsigned long flags = 0;
@@ -816,29 +816,6 @@ static int qla4010_soft_reset(struct scsi_qla_host *ha)
        return status;
 }
 
-/**
- * qla4xxx_topcat_reset - performs hard reset of TopCat Chip.
- * @ha: Pointer to host adapter structure.
- **/
-static int qla4xxx_topcat_reset(struct scsi_qla_host *ha)
-{
-       unsigned long flags;
-
-       ql4xxx_lock_nvram(ha);
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       writel(set_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
-       readl(isp_gp_out(ha));
-       mdelay(1);
-
-       writel(clr_rmask(GPOR_TOPCAT_RESET), isp_gp_out(ha));
-       readl(isp_gp_out(ha));
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       mdelay(2523);
-
-       ql4xxx_unlock_nvram(ha);
-       return QLA_SUCCESS;
-}
-
 /**
  * qla4xxx_flush_active_srbs - returns all outstanding i/o requests to O.S.
  * @ha: Pointer to host adapter structure.
@@ -866,26 +843,6 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
 
 }
 
-/**
- * qla4xxx_hard_reset - performs HBA Hard Reset
- * @ha: Pointer to host adapter structure.
- **/
-static int qla4xxx_hard_reset(struct scsi_qla_host *ha)
-{
-       /* The QLA4010 really doesn't have an equivalent to a hard reset */
-       qla4xxx_flush_active_srbs(ha);
-       if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
-               int status = QLA_ERROR;
-
-               if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
-                   (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
-                   (qla4010_soft_reset(ha) == QLA_SUCCESS))
-                       status = QLA_SUCCESS;
-               return status;
-       } else
-               return qla4010_soft_reset(ha);
-}
-
 /**
  * qla4xxx_recover_adapter - recovers adapter after a fatal error
  * @ha: Pointer to host adapter structure.
@@ -919,18 +876,11 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
        if (status == QLA_SUCCESS) {
                DEBUG2(printk("scsi%ld: %s - Performing soft reset..\n",
                              ha->host_no, __func__));
-               status = qla4xxx_soft_reset(ha);
-       }
-       /* FIXMEkaren: Do we want to keep interrupts enabled and process
-          AENs after soft reset */
-
-       /* If firmware (SOFT) reset failed, or if all outstanding
-        * commands have not returned, then do a HARD reset.
-        */
-       if (status == QLA_ERROR) {
-               DEBUG2(printk("scsi%ld: %s - Performing hard reset..\n",
-                             ha->host_no, __func__));
-               status = qla4xxx_hard_reset(ha);
+               qla4xxx_flush_active_srbs(ha);
+               if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
+                       status = qla4xxx_soft_reset(ha);
+               else
+                       status = QLA_ERROR;
        }
 
        /* Flush any pending ddb changed AENs */
@@ -1011,18 +961,15 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
  * the mid-level tries to sleep when it reaches the driver threshold
  * "host->can_queue". This can cause a panic if we were in our interrupt code.
  **/
-static void qla4xxx_do_dpc(void *data)
+static void qla4xxx_do_dpc(struct work_struct *work)
 {
-       struct scsi_qla_host *ha = (struct scsi_qla_host *) data;
+       struct scsi_qla_host *ha =
+               container_of(work, struct scsi_qla_host, dpc_work);
        struct ddb_entry *ddb_entry, *dtemp;
 
-       DEBUG2(printk("scsi%ld: %s: DPC handler waking up.\n",
-                     ha->host_no, __func__));
-
-       DEBUG2(printk("scsi%ld: %s: ha->flags = 0x%08lx\n",
-                     ha->host_no, __func__, ha->flags));
-       DEBUG2(printk("scsi%ld: %s: ha->dpc_flags = 0x%08lx\n",
-                     ha->host_no, __func__, ha->dpc_flags));
+       DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
+               "flags = 0x%08lx, dpc_flags = 0x%08lx\n",
+               ha->host_no, __func__, ha->flags, ha->dpc_flags));
 
        /* Initialization not yet finished. Don't do anything yet. */
        if (!test_bit(AF_INIT_DONE, &ha->flags))
@@ -1032,16 +979,8 @@ static void qla4xxx_do_dpc(void *data)
            test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
            test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags)) {
-               if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags))
-                       /*
-                        * dg 09/23 Never initialize ddb list
-                        * once we up and running
-                        * qla4xxx_recover_adapter(ha,
-                        *    REBUILD_DDB_LIST);
-                        */
-                       qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
-
-               if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
+               if (test_bit(DPC_RESET_HA_DESTROY_DDB_LIST, &ha->dpc_flags) ||
+                       test_bit(DPC_RESET_HA, &ha->dpc_flags))
                        qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST);
 
                if (test_and_clear_bit(DPC_RESET_HA_INTR, &ha->dpc_flags)) {
@@ -1122,7 +1061,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
                destroy_workqueue(ha->dpc_thread);
 
        /* Issue Soft Reset to put firmware in unknown state */
-       qla4xxx_soft_reset(ha);
+       if (ql4xxx_lock_drvr_wait(ha) == QLA_SUCCESS)
+               qla4xxx_soft_reset(ha);
 
        /* Remove timer thread, if present */
        if (ha->timer_active)
@@ -1261,7 +1201,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
        init_waitqueue_head(&ha->mailbox_wait_queue);
 
        spin_lock_init(&ha->hardware_lock);
-       spin_lock_init(&ha->list_lock);
 
        /* Allocate dma buffers */
        if (qla4xxx_mem_alloc(ha)) {
@@ -1315,7 +1254,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                ret = -ENODEV;
                goto probe_failed;
        }
-       INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha);
+       INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
 
        ret = request_irq(pdev->irq, qla4xxx_intr_handler,
                          SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha);
@@ -1467,27 +1406,6 @@ struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t in
        return srb;
 }
 
-/**
- * qla4xxx_soft_reset - performs a SOFT RESET of hba.
- * @ha: Pointer to host adapter structure.
- **/
-int qla4xxx_soft_reset(struct scsi_qla_host *ha)
-{
-
-       DEBUG2(printk(KERN_WARNING "scsi%ld: %s: chip reset!\n", ha->host_no,
-                     __func__));
-       if (test_bit(AF_TOPCAT_CHIP_PRESENT, &ha->flags)) {
-               int status = QLA_ERROR;
-
-               if ((qla4010_soft_reset(ha) == QLA_SUCCESS) &&
-                   (qla4xxx_topcat_reset(ha) == QLA_SUCCESS) &&
-                   (qla4010_soft_reset(ha) == QLA_SUCCESS) )
-                       status = QLA_SUCCESS;
-               return status;
-       } else
-               return qla4010_soft_reset(ha);
-}
-
 /**
  * qla4xxx_eh_wait_on_command - waits for command to be returned by firmware
  * @ha: actual ha whose done queue will contain the comd returned by firmware.
@@ -1686,6 +1604,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
                .subvendor      = PCI_ANY_ID,
                .subdevice      = PCI_ANY_ID,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_QLOGIC,
+               .device         = PCI_DEVICE_ID_QLOGIC_ISP4032,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+       },
        {0, 0},
 };
 MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
index b3fe7e68988e6378e3c033a9535dc49523353c01..454e19c8ad685f1e7d44543c2f0f419d75b1f5be 100644 (file)
@@ -5,9 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
-
-#define QL4_DRIVER_MAJOR_VER   5
-#define QL4_DRIVER_MINOR_VER   0
-#define QL4_DRIVER_PATCH_VER   5
-#define QL4_DRIVER_BETA_VER    9
+#define QLA4XXX_DRIVER_VERSION "5.00.07-k"
index c59f31533ab4d2b0d072ead30c61a4a7ea6b199e..24cffd98ee63546ce716e0277812dda9bafd08d2 100644 (file)
@@ -136,7 +136,7 @@ const char * scsi_device_type(unsigned type)
 EXPORT_SYMBOL(scsi_device_type);
 
 struct scsi_host_cmd_pool {
-       kmem_cache_t    *slab;
+       struct kmem_cache       *slab;
        unsigned int    users;
        char            *name;
        unsigned int    slab_flags;
@@ -156,8 +156,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
 
 static DEFINE_MUTEX(host_cmd_pool_mutex);
 
-static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
-                                           gfp_t gfp_mask)
+struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
 {
        struct scsi_cmnd *cmd;
 
@@ -178,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
 
        return cmd;
 }
+EXPORT_SYMBOL_GPL(__scsi_get_command);
 
 /*
  * Function:   scsi_get_command()
@@ -214,9 +214,29 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
                put_device(&dev->sdev_gendev);
 
        return cmd;
-}                              
+}
 EXPORT_SYMBOL(scsi_get_command);
 
+void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
+                       struct device *dev)
+{
+       unsigned long flags;
+
+       /* changing locks here, don't need to restore the irq state */
+       spin_lock_irqsave(&shost->free_list_lock, flags);
+       if (unlikely(list_empty(&shost->free_list))) {
+               list_add(&cmd->list, &shost->free_list);
+               cmd = NULL;
+       }
+       spin_unlock_irqrestore(&shost->free_list_lock, flags);
+
+       if (likely(cmd != NULL))
+               kmem_cache_free(shost->cmd_pool->slab, cmd);
+
+       put_device(dev);
+}
+EXPORT_SYMBOL(__scsi_put_command);
+
 /*
  * Function:   scsi_put_command()
  *
@@ -231,26 +251,15 @@ EXPORT_SYMBOL(scsi_get_command);
 void scsi_put_command(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
-       struct Scsi_Host *shost = sdev->host;
        unsigned long flags;
-       
+
        /* serious error if the command hasn't come from a device list */
        spin_lock_irqsave(&cmd->device->list_lock, flags);
        BUG_ON(list_empty(&cmd->list));
        list_del_init(&cmd->list);
-       spin_unlock(&cmd->device->list_lock);
-       /* changing locks here, don't need to restore the irq state */
-       spin_lock(&shost->free_list_lock);
-       if (unlikely(list_empty(&shost->free_list))) {
-               list_add(&cmd->list, &shost->free_list);
-               cmd = NULL;
-       }
-       spin_unlock_irqrestore(&shost->free_list_lock, flags);
-
-       if (likely(cmd != NULL))
-               kmem_cache_free(shost->cmd_pool->slab, cmd);
+       spin_unlock_irqrestore(&cmd->device->list_lock, flags);
 
-       put_device(&sdev->sdev_gendev);
+       __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
 }
 EXPORT_SYMBOL(scsi_put_command);
 
@@ -871,9 +880,9 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
+#ifdef CONFIG_MODULE_UNLOAD
        struct module *module = sdev->host->hostt->module;
 
-#ifdef CONFIG_MODULE_UNLOAD
        /* The module refcount will be zero if scsi_device_get()
         * was called from a module removal routine */
        if (module && module_refcount(module) != 0)
index aff1b0cfd4b25bfcdf2ceda22e6d522969d7ab9b..2ecb6ff42444697d9aa866de4bb00c0d707880b5 100644 (file)
@@ -453,9 +453,18 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
 }
 
 /**
- * scsi_send_eh_cmnd  - send a cmd to a device as part of error recovery.
- * @scmd:      SCSI Cmd to send.
- * @timeout:   Timeout for cmd.
+ * scsi_send_eh_cmnd  - submit a scsi command as part of error recory
+ * @scmd:       SCSI command structure to hijack
+ * @cmnd:       CDB to send
+ * @cmnd_size:  size in bytes of @cmnd
+ * @timeout:    timeout for this request
+ * @copy_sense: request sense data if set to 1
+ *
+ * This function is used to send a scsi command down to a target device
+ * as part of the error recovery process.  If @copy_sense is 0 the command
+ * sent must be one that does not transfer any data.  If @copy_sense is 1
+ * the command must be REQUEST_SENSE and this functions copies out the
+ * sense buffer it got into @scmd->sense_buffer.
  *
  * Return value:
  *    SUCCESS or FAILED or NEEDS_RETRY
@@ -469,6 +478,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
        DECLARE_COMPLETION_ONSTACK(done);
        unsigned long timeleft;
        unsigned long flags;
+       struct scatterlist sgl;
        unsigned char old_cmnd[MAX_COMMAND_SIZE];
        enum dma_data_direction old_data_direction;
        unsigned short old_use_sg;
@@ -500,19 +510,24 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                if (shost->hostt->unchecked_isa_dma)
                        gfp_mask |= __GFP_DMA;
 
-               scmd->sc_data_direction = DMA_FROM_DEVICE;
-               scmd->request_bufflen = 252;
-               scmd->request_buffer = kzalloc(scmd->request_bufflen, gfp_mask);
-               if (!scmd->request_buffer)
+               sgl.page = alloc_page(gfp_mask);
+               if (!sgl.page)
                        return FAILED;
+               sgl.offset = 0;
+               sgl.length = 252;
+
+               scmd->sc_data_direction = DMA_FROM_DEVICE;
+               scmd->request_bufflen = sgl.length;
+               scmd->request_buffer = &sgl;
+               scmd->use_sg = 1;
        } else {
                scmd->request_buffer = NULL;
                scmd->request_bufflen = 0;
                scmd->sc_data_direction = DMA_NONE;
+               scmd->use_sg = 0;
        }
 
        scmd->underflow = 0;
-       scmd->use_sg = 0;
        scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
 
        if (sdev->scsi_level <= SCSI_2)
@@ -583,7 +598,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                        memcpy(scmd->sense_buffer, scmd->request_buffer,
                               sizeof(scmd->sense_buffer));
                }
-               kfree(scmd->request_buffer);
+               __free_page(sgl.page);
        }
 
 
index 3ac4890ce086cfab2ff6519c1f9858760b764de5..1748e27501cdee45da304d4cdfd97f1c0ff37372 100644 (file)
@@ -36,7 +36,7 @@
 struct scsi_host_sg_pool {
        size_t          size;
        char            *name; 
-       kmem_cache_t    *slab;
+       struct kmem_cache       *slab;
        mempool_t       *pool;
 };
 
@@ -241,7 +241,7 @@ struct scsi_io_context {
        char sense[SCSI_SENSE_BUFFERSIZE];
 };
 
-static kmem_cache_t *scsi_io_context_cache;
+static struct kmem_cache *scsi_io_context_cache;
 
 static void scsi_end_async(struct request *req, int uptodate)
 {
@@ -704,7 +704,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
        return NULL;
 }
 
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
 {
        struct scsi_host_sg_pool *sgp;
        struct scatterlist *sgl;
@@ -745,7 +745,9 @@ static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_m
        return sgl;
 }
 
-static void scsi_free_sgtable(struct scatterlist *sgl, int index)
+EXPORT_SYMBOL(scsi_alloc_sgtable);
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
 {
        struct scsi_host_sg_pool *sgp;
 
@@ -755,6 +757,8 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
        mempool_free(sgl, sgp->pool);
 }
 
+EXPORT_SYMBOL(scsi_free_sgtable);
+
 /*
  * Function:    scsi_release_buffers()
  *
@@ -996,25 +1000,14 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
        int                count;
 
        /*
-        * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
-        */
-       if (blk_pc_request(req) && !req->bio) {
-               cmd->request_bufflen = req->data_len;
-               cmd->request_buffer = req->data;
-               req->buffer = req->data;
-               cmd->use_sg = 0;
-               return 0;
-       }
-
-       /*
-        * we used to not use scatter-gather for single segment request,
+        * We used to not use scatter-gather for single segment request,
         * but now we do (it makes highmem I/O easier to support without
         * kmapping pages)
         */
        cmd->use_sg = req->nr_phys_segments;
 
        /*
-        * if sg table allocation fails, requeue request later.
+        * If sg table allocation fails, requeue request later.
         */
        sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
        if (unlikely(!sgpnt)) {
@@ -1022,24 +1015,21 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
                return BLKPREP_DEFER;
        }
 
+       req->buffer = NULL;
        cmd->request_buffer = (char *) sgpnt;
-       cmd->request_bufflen = req->nr_sectors << 9;
        if (blk_pc_request(req))
                cmd->request_bufflen = req->data_len;
-       req->buffer = NULL;
+       else
+               cmd->request_bufflen = req->nr_sectors << 9;
 
        /* 
         * Next, walk the list, and fill in the addresses and sizes of
         * each segment.
         */
        count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
-
-       /*
-        * mapped well, send it off
-        */
        if (likely(count <= cmd->use_sg)) {
                cmd->use_sg = count;
-               return 0;
+               return BLKPREP_OK;
        }
 
        printk(KERN_ERR "Incorrect number of segments after building list\n");
@@ -1069,6 +1059,27 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
        return -EOPNOTSUPP;
 }
 
+static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
+               struct request *req)
+{
+       struct scsi_cmnd *cmd;
+
+       if (!req->special) {
+               cmd = scsi_get_command(sdev, GFP_ATOMIC);
+               if (unlikely(!cmd))
+                       return NULL;
+               req->special = cmd;
+       } else {
+               cmd = req->special;
+       }
+
+       /* pull a tag out of the request if we have one */
+       cmd->tag = req->tag;
+       cmd->request = req;
+
+       return cmd;
+}
+
 static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
 {
        BUG_ON(!blk_pc_request(cmd->request));
@@ -1081,9 +1092,37 @@ static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
        scsi_io_completion(cmd, cmd->request_bufflen);
 }
 
-static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
 {
-       struct request *req = cmd->request;
+       struct scsi_cmnd *cmd;
+
+       cmd = scsi_get_cmd_from_req(sdev, req);
+       if (unlikely(!cmd))
+               return BLKPREP_DEFER;
+
+       /*
+        * BLOCK_PC requests may transfer data, in which case they must
+        * a bio attached to them.  Or they might contain a SCSI command
+        * that does not transfer data, in which case they may optionally
+        * submit a request without an attached bio.
+        */
+       if (req->bio) {
+               int ret;
+
+               BUG_ON(!req->nr_phys_segments);
+
+               ret = scsi_init_io(cmd);
+               if (unlikely(ret))
+                       return ret;
+       } else {
+               BUG_ON(req->data_len);
+               BUG_ON(req->data);
+
+               cmd->request_bufflen = 0;
+               cmd->request_buffer = NULL;
+               cmd->use_sg = 0;
+               req->buffer = NULL;
+       }
 
        BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
        memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
@@ -1099,154 +1138,138 @@ static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
        cmd->allowed = req->retries;
        cmd->timeout_per_command = req->timeout;
        cmd->done = scsi_blk_pc_done;
+       return BLKPREP_OK;
 }
 
-static int scsi_prep_fn(struct request_queue *q, struct request *req)
+/*
+ * Setup a REQ_TYPE_FS command.  These are simple read/write request
+ * from filesystems that still need to be translated to SCSI CDBs from
+ * the ULD.
+ */
+static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 {
-       struct scsi_device *sdev = q->queuedata;
        struct scsi_cmnd *cmd;
-       int specials_only = 0;
+       struct scsi_driver *drv;
+       int ret;
 
        /*
-        * Just check to see if the device is online.  If it isn't, we
-        * refuse to process any commands.  The device must be brought
-        * online before trying any recovery commands
+        * Filesystem requests must transfer data.
         */
-       if (unlikely(!scsi_device_online(sdev))) {
-               sdev_printk(KERN_ERR, sdev,
-                           "rejecting I/O to offline device\n");
-               goto kill;
-       }
-       if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
-               /* OK, we're not in a running state don't prep
-                * user commands */
-               if (sdev->sdev_state == SDEV_DEL) {
-                       /* Device is fully deleted, no commands
-                        * at all allowed down */
-                       sdev_printk(KERN_ERR, sdev,
-                                   "rejecting I/O to dead device\n");
-                       goto kill;
-               }
-               /* OK, we only allow special commands (i.e. not
-                * user initiated ones */
-               specials_only = sdev->sdev_state;
+       BUG_ON(!req->nr_phys_segments);
+
+       cmd = scsi_get_cmd_from_req(sdev, req);
+       if (unlikely(!cmd))
+               return BLKPREP_DEFER;
+
+       ret = scsi_init_io(cmd);
+       if (unlikely(ret))
+               return ret;
+
+       /*
+        * Initialize the actual SCSI command for this request.
+        */
+       drv = *(struct scsi_driver **)req->rq_disk->private_data;
+       if (unlikely(!drv->init_command(cmd))) {
+               scsi_release_buffers(cmd);
+               scsi_put_command(cmd);
+               return BLKPREP_KILL;
        }
 
+       return BLKPREP_OK;
+}
+
+static int scsi_prep_fn(struct request_queue *q, struct request *req)
+{
+       struct scsi_device *sdev = q->queuedata;
+       int ret = BLKPREP_OK;
+
        /*
-        * Find the actual device driver associated with this command.
-        * The SPECIAL requests are things like character device or
-        * ioctls, which did not originate from ll_rw_blk.  Note that
-        * the special field is also used to indicate the cmd for
-        * the remainder of a partially fulfilled request that can 
-        * come up when there is a medium error.  We have to treat
-        * these two cases differently.  We differentiate by looking
-        * at request->cmd, as this tells us the real story.
+        * If the device is not in running state we will reject some
+        * or all commands.
         */
-       if (blk_special_request(req) && req->special)
-               cmd = req->special;
-       else if (blk_pc_request(req) || blk_fs_request(req)) {
-               if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){
-                       if (specials_only == SDEV_QUIESCE ||
-                           specials_only == SDEV_BLOCK)
-                               goto defer;
-                       
+       if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+               switch (sdev->sdev_state) {
+               case SDEV_OFFLINE:
+                       /*
+                        * If the device is offline we refuse to process any
+                        * commands.  The device must be brought online
+                        * before trying any recovery commands.
+                        */
                        sdev_printk(KERN_ERR, sdev,
-                                   "rejecting I/O to device being removed\n");
-                       goto kill;
+                                   "rejecting I/O to offline device\n");
+                       ret = BLKPREP_KILL;
+                       break;
+               case SDEV_DEL:
+                       /*
+                        * If the device is fully deleted, we refuse to
+                        * process any commands as well.
+                        */
+                       sdev_printk(KERN_ERR, sdev,
+                                   "rejecting I/O to dead device\n");
+                       ret = BLKPREP_KILL;
+                       break;
+               case SDEV_QUIESCE:
+               case SDEV_BLOCK:
+                       /*
+                        * If the devices is blocked we defer normal commands.
+                        */
+                       if (!(req->cmd_flags & REQ_PREEMPT))
+                               ret = BLKPREP_DEFER;
+                       break;
+               default:
+                       /*
+                        * For any other not fully online state we only allow
+                        * special commands.  In particular any user initiated
+                        * command is not allowed.
+                        */
+                       if (!(req->cmd_flags & REQ_PREEMPT))
+                               ret = BLKPREP_KILL;
+                       break;
                }
-                       
-               /*
-                * Now try and find a command block that we can use.
-                */
-               if (!req->special) {
-                       cmd = scsi_get_command(sdev, GFP_ATOMIC);
-                       if (unlikely(!cmd))
-                               goto defer;
-               } else
-                       cmd = req->special;
-               
-               /* pull a tag out of the request if we have one */
-               cmd->tag = req->tag;
-       } else {
-               blk_dump_rq_flags(req, "SCSI bad req");
-               goto kill;
+
+               if (ret != BLKPREP_OK)
+                       goto out;
        }
-       
-       /* note the overloading of req->special.  When the tag
-        * is active it always means cmd.  If the tag goes
-        * back for re-queueing, it may be reset */
-       req->special = cmd;
-       cmd->request = req;
-       
-       /*
-        * FIXME: drop the lock here because the functions below
-        * expect to be called without the queue lock held.  Also,
-        * previously, we dequeued the request before dropping the
-        * lock.  We hope REQ_STARTED prevents anything untoward from
-        * happening now.
-        */
-       if (blk_fs_request(req) || blk_pc_request(req)) {
-               int ret;
 
+       switch (req->cmd_type) {
+       case REQ_TYPE_BLOCK_PC:
+               ret = scsi_setup_blk_pc_cmnd(sdev, req);
+               break;
+       case REQ_TYPE_FS:
+               ret = scsi_setup_fs_cmnd(sdev, req);
+               break;
+       default:
                /*
-                * This will do a couple of things:
-                *  1) Fill in the actual SCSI command.
-                *  2) Fill in any other upper-level specific fields
-                * (timeout).
+                * All other command types are not supported.
                 *
-                * If this returns 0, it means that the request failed
-                * (reading past end of disk, reading offline device,
-                * etc).   This won't actually talk to the device, but
-                * some kinds of consistency checking may cause the     
-                * request to be rejected immediately.
+                * Note that these days the SCSI subsystem does not use
+                * REQ_TYPE_SPECIAL requests anymore.  These are only used
+                * (directly or via blk_insert_request) by non-SCSI drivers.
                 */
+               blk_dump_rq_flags(req, "SCSI bad req");
+               ret = BLKPREP_KILL;
+               break;
+       }
 
-               /* 
-                * This sets up the scatter-gather table (allocating if
-                * required).
-                */
-               ret = scsi_init_io(cmd);
-               switch(ret) {
-                       /* For BLKPREP_KILL/DEFER the cmd was released */
-               case BLKPREP_KILL:
-                       goto kill;
-               case BLKPREP_DEFER:
-                       goto defer;
-               }
-               
+ out:
+       switch (ret) {
+       case BLKPREP_KILL:
+               req->errors = DID_NO_CONNECT << 16;
+               break;
+       case BLKPREP_DEFER:
                /*
-                * Initialize the actual SCSI command for this request.
+                * If we defer, the elv_next_request() returns NULL, but the
+                * queue must be restarted, so we plug here if no returning
+                * command will automatically do that.
                 */
-               if (blk_pc_request(req)) {
-                       scsi_setup_blk_pc_cmnd(cmd);
-               } else if (req->rq_disk) {
-                       struct scsi_driver *drv;
-
-                       drv = *(struct scsi_driver **)req->rq_disk->private_data;
-                       if (unlikely(!drv->init_command(cmd))) {
-                               scsi_release_buffers(cmd);
-                               scsi_put_command(cmd);
-                               goto kill;
-                       }
-               }
+               if (sdev->device_busy == 0)
+                       blk_plug_device(q);
+               break;
+       default:
+               req->cmd_flags |= REQ_DONTPREP;
        }
 
-       /*
-        * The request is now prepped, no need to come back here
-        */
-       req->cmd_flags |= REQ_DONTPREP;
-       return BLKPREP_OK;
-
- defer:
-       /* If we defer, the elv_next_request() returns NULL, but the
-        * queue must be restarted, so we plug here if no returning
-        * command will automatically do that. */
-       if (sdev->device_busy == 0)
-               blk_plug_device(q);
-       return BLKPREP_DEFER;
- kill:
-       req->errors = DID_NO_CONNECT << 16;
-       return BLKPREP_KILL;
+       return ret;
 }
 
 /*
@@ -1548,29 +1571,40 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
 
-struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+                                        request_fn_proc *request_fn)
 {
-       struct Scsi_Host *shost = sdev->host;
        struct request_queue *q;
 
-       q = blk_init_queue(scsi_request_fn, NULL);
+       q = blk_init_queue(request_fn, NULL);
        if (!q)
                return NULL;
 
-       blk_queue_prep_rq(q, scsi_prep_fn);
-
        blk_queue_max_hw_segments(q, shost->sg_tablesize);
        blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
        blk_queue_max_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
-       blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
-       blk_queue_softirq_done(q, scsi_softirq_done);
 
        if (!shost->use_clustering)
                clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
        return q;
 }
+EXPORT_SYMBOL(__scsi_alloc_queue);
+
+struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+{
+       struct request_queue *q;
+
+       q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
+       if (!q)
+               return NULL;
+
+       blk_queue_prep_rq(q, scsi_prep_fn);
+       blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+       blk_queue_softirq_done(q, scsi_softirq_done);
+       return q;
+}
 
 void scsi_free_queue(struct request_queue *q)
 {
index 5d023d44e5e7f1d026979b7bf63c96c4dce2b526..f458c2f686d2364d4387ea0bc8b4d0afec6f8d4c 100644 (file)
@@ -39,6 +39,9 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
        { };
 #endif
 
+/* scsi_scan.c */
+int scsi_complete_async_scans(void);
+
 /* scsi_devinfo.c */
 extern int scsi_get_device_flags(struct scsi_device *sdev,
                                 const unsigned char *vendor,
index 94a274645f6f36819c29aadb3aeff7b5290581e8..14e635aa44ce6963b0f206a4324c603a00503af0 100644 (file)
@@ -29,7 +29,9 @@
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
-#include <asm/semaphore.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -87,6 +89,17 @@ module_param_named(max_luns, max_scsi_luns, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(max_luns,
                 "last scsi LUN (should be between 1 and 2^32-1)");
 
+#ifdef CONFIG_SCSI_SCAN_ASYNC
+#define SCSI_SCAN_TYPE_DEFAULT "async"
+#else
+#define SCSI_SCAN_TYPE_DEFAULT "sync"
+#endif
+
+static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+
+module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
+MODULE_PARM_DESC(scan, "sync, async or none");
+
 /*
  * max_scsi_report_luns: the maximum number of LUNS that will be
  * returned from the REPORT LUNS command. 8 times this value must
@@ -108,6 +121,68 @@ MODULE_PARM_DESC(inq_timeout,
                 "Timeout (in seconds) waiting for devices to answer INQUIRY."
                 " Default is 5. Some non-compliant devices need more.");
 
+static DEFINE_SPINLOCK(async_scan_lock);
+static LIST_HEAD(scanning_hosts);
+
+struct async_scan_data {
+       struct list_head list;
+       struct Scsi_Host *shost;
+       struct completion prev_finished;
+};
+
+/**
+ * scsi_complete_async_scans - Wait for asynchronous scans to complete
+ *
+ * Asynchronous scans add themselves to the scanning_hosts list.  Once
+ * that list is empty, we know that the scans are complete.  Rather than
+ * waking up periodically to check the state of the list, we pretend to be
+ * a scanning task by adding ourselves at the end of the list and going to
+ * sleep.  When the task before us wakes us up, we take ourselves off the
+ * list and return.
+ */
+int scsi_complete_async_scans(void)
+{
+       struct async_scan_data *data;
+
+       do {
+               if (list_empty(&scanning_hosts))
+                       return 0;
+               /* If we can't get memory immediately, that's OK.  Just
+                * sleep a little.  Even if we never get memory, the async
+                * scans will finish eventually.
+                */
+               data = kmalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       msleep(1);
+       } while (!data);
+
+       data->shost = NULL;
+       init_completion(&data->prev_finished);
+
+       spin_lock(&async_scan_lock);
+       /* Check that there's still somebody else on the list */
+       if (list_empty(&scanning_hosts))
+               goto done;
+       list_add_tail(&data->list, &scanning_hosts);
+       spin_unlock(&async_scan_lock);
+
+       printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
+       wait_for_completion(&data->prev_finished);
+
+       spin_lock(&async_scan_lock);
+       list_del(&data->list);
+ done:
+       spin_unlock(&async_scan_lock);
+
+       kfree(data);
+       return 0;
+}
+
+#ifdef MODULE
+/* Only exported for the benefit of scsi_wait_scan */
+EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
+#endif
+
 /**
  * scsi_unlock_floptical - unlock device via a special MODE SENSE command
  * @sdev:      scsi device to send command to
@@ -362,9 +437,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
        goto retry;
 }
 
-static void scsi_target_reap_usercontext(void *data)
+static void scsi_target_reap_usercontext(struct work_struct *work)
 {
-       struct scsi_target *starget = data;
+       struct scsi_target *starget =
+               container_of(work, struct scsi_target, ew.work);
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
        unsigned long flags;
 
@@ -400,7 +476,7 @@ void scsi_target_reap(struct scsi_target *starget)
                starget->state = STARGET_DEL;
                spin_unlock_irqrestore(shost->host_lock, flags);
                execute_in_process_context(scsi_target_reap_usercontext,
-                                          starget, &starget->ew);
+                                          &starget->ew);
                return;
 
        }
@@ -619,7 +695,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-               int *bflags)
+               int *bflags, int async)
 {
        /*
         * XXX do not save the inquiry, since it can change underneath us,
@@ -805,7 +881,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
         * register it and tell the rest of the kernel
         * about it.
         */
-       if (scsi_sysfs_add_sdev(sdev) != 0)
+       if (!async && scsi_sysfs_add_sdev(sdev) != 0)
                return SCSI_SCAN_NO_RESPONSE;
 
        return SCSI_SCAN_LUN_PRESENT;
@@ -974,7 +1050,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
                goto out_free_result;
        }
 
-       res = scsi_add_lun(sdev, result, &bflags);
+       res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
        if (res == SCSI_SCAN_LUN_PRESENT) {
                if (bflags & BLIST_KEY) {
                        sdev->lockable = 0;
@@ -1474,6 +1550,12 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
 {
        struct Scsi_Host *shost = dev_to_shost(parent);
 
+       if (strncmp(scsi_scan_type, "none", 4) == 0)
+               return;
+
+       if (!shost->async_scan)
+               scsi_complete_async_scans();
+
        mutex_lock(&shost->scan_mutex);
        if (scsi_host_scan_allowed(shost))
                __scsi_scan_target(parent, channel, id, lun, rescan);
@@ -1519,6 +1601,9 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
                "%s: <%u:%u:%u>\n",
                __FUNCTION__, channel, id, lun));
 
+       if (!shost->async_scan)
+               scsi_complete_async_scans();
+
        if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
            ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
            ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
@@ -1539,14 +1624,143 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
        return 0;
 }
 
+static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+{
+       struct scsi_device *sdev;
+       shost_for_each_device(sdev, shost) {
+               if (scsi_sysfs_add_sdev(sdev) != 0)
+                       scsi_destroy_sdev(sdev);
+       }
+}
+
+/**
+ * scsi_prep_async_scan - prepare for an async scan
+ * @shost: the host which will be scanned
+ * Returns: a cookie to be passed to scsi_finish_async_scan()
+ *
+ * Tells the midlayer this host is going to do an asynchronous scan.
+ * It reserves the host's position in the scanning list and ensures
+ * that other asynchronous scans started after this one won't affect the
+ * ordering of the discovered devices.
+ */
+static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
+{
+       struct async_scan_data *data;
+
+       if (strncmp(scsi_scan_type, "sync", 4) == 0)
+               return NULL;
+
+       if (shost->async_scan) {
+               printk("%s called twice for host %d", __FUNCTION__,
+                               shost->host_no);
+               dump_stack();
+               return NULL;
+       }
+
+       data = kmalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto err;
+       data->shost = scsi_host_get(shost);
+       if (!data->shost)
+               goto err;
+       init_completion(&data->prev_finished);
+
+       spin_lock(&async_scan_lock);
+       shost->async_scan = 1;
+       if (list_empty(&scanning_hosts))
+               complete(&data->prev_finished);
+       list_add_tail(&data->list, &scanning_hosts);
+       spin_unlock(&async_scan_lock);
+
+       return data;
+
+ err:
+       kfree(data);
+       return NULL;
+}
+
+/**
+ * scsi_finish_async_scan - asynchronous scan has finished
+ * @data: cookie returned from earlier call to scsi_prep_async_scan()
+ *
+ * All the devices currently attached to this host have been found.
+ * This function announces all the devices it has found to the rest
+ * of the system.
+ */
+static void scsi_finish_async_scan(struct async_scan_data *data)
+{
+       struct Scsi_Host *shost;
+
+       if (!data)
+               return;
+
+       shost = data->shost;
+       if (!shost->async_scan) {
+               printk("%s called twice for host %d", __FUNCTION__,
+                               shost->host_no);
+               dump_stack();
+               return;
+       }
+
+       wait_for_completion(&data->prev_finished);
+
+       scsi_sysfs_add_devices(shost);
+
+       spin_lock(&async_scan_lock);
+       shost->async_scan = 0;
+       list_del(&data->list);
+       if (!list_empty(&scanning_hosts)) {
+               struct async_scan_data *next = list_entry(scanning_hosts.next,
+                               struct async_scan_data, list);
+               complete(&next->prev_finished);
+       }
+       spin_unlock(&async_scan_lock);
+
+       scsi_host_put(shost);
+       kfree(data);
+}
+
+static void do_scsi_scan_host(struct Scsi_Host *shost)
+{
+       if (shost->hostt->scan_finished) {
+               unsigned long start = jiffies;
+               if (shost->hostt->scan_start)
+                       shost->hostt->scan_start(shost);
+
+               while (!shost->hostt->scan_finished(shost, jiffies - start))
+                       msleep(10);
+       } else {
+               scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
+                               SCAN_WILD_CARD, 0);
+       }
+}
+
+static int do_scan_async(void *_data)
+{
+       struct async_scan_data *data = _data;
+       do_scsi_scan_host(data->shost);
+       scsi_finish_async_scan(data);
+       return 0;
+}
+
 /**
  * scsi_scan_host - scan the given adapter
  * @shost:     adapter to scan
  **/
 void scsi_scan_host(struct Scsi_Host *shost)
 {
-       scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
-                               SCAN_WILD_CARD, 0);
+       struct async_scan_data *data;
+
+       if (strncmp(scsi_scan_type, "none", 4) == 0)
+               return;
+
+       data = scsi_prep_async_scan(shost);
+       if (!data) {
+               do_scsi_scan_host(shost);
+               return;
+       }
+
+       kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
 }
 EXPORT_SYMBOL(scsi_scan_host);
 
index e1a91665d1c2ac9e00698073b7aea186953e0fd9..259c90cfa367027a29e812645fa5ab9e856ff28d 100644 (file)
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev)
        put_device(&sdev->sdev_gendev);
 }
 
-static void scsi_device_dev_release_usercontext(void *data)
+static void scsi_device_dev_release_usercontext(struct work_struct *work)
 {
-       struct device *dev = data;
        struct scsi_device *sdev;
        struct device *parent;
        struct scsi_target *starget;
        unsigned long flags;
 
-       parent = dev->parent;
-       sdev = to_scsi_device(dev);
+       sdev = container_of(work, struct scsi_device, ew.work);
+
+       parent = sdev->sdev_gendev.parent;
        starget = to_scsi_target(parent);
 
        spin_lock_irqsave(sdev->host->host_lock, flags);
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data)
 static void scsi_device_dev_release(struct device *dev)
 {
        struct scsi_device *sdp = to_scsi_device(dev);
-       execute_in_process_context(scsi_device_dev_release_usercontext, dev,
+       execute_in_process_context(scsi_device_dev_release_usercontext,
                                   &sdp->ew);
 }
 
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
new file mode 100644 (file)
index 0000000..37bbfbd
--- /dev/null
@@ -0,0 +1,352 @@
+/*
+ * SCSI target kernel/user interface functions
+ *
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/miscdevice.h>
+#include <linux/file.h>
+#include <net/tcp.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <scsi/scsi_tgt_if.h>
+
+#include <asm/cacheflush.h>
+
+#include "scsi_tgt_priv.h"
+
+struct tgt_ring {
+       u32 tr_idx;
+       unsigned long tr_pages[TGT_RING_PAGES];
+       spinlock_t tr_lock;
+};
+
+/* tx_ring : kernel->user, rx_ring : user->kernel */
+static struct tgt_ring tx_ring, rx_ring;
+static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
+
+static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
+{
+       if (ring->tr_idx == TGT_MAX_EVENTS - 1)
+               ring->tr_idx = 0;
+       else
+               ring->tr_idx++;
+}
+
+static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
+{
+       u32 pidx, off;
+
+       pidx = idx / TGT_EVENT_PER_PAGE;
+       off = idx % TGT_EVENT_PER_PAGE;
+
+       return (struct tgt_event *)
+               (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
+}
+
+static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
+{
+       struct tgt_event *ev;
+       struct tgt_ring *ring = &tx_ring;
+       unsigned long flags;
+       int err = 0;
+
+       spin_lock_irqsave(&ring->tr_lock, flags);
+
+       ev = tgt_head_event(ring, ring->tr_idx);
+       if (!ev->hdr.status)
+               tgt_ring_idx_inc(ring);
+       else
+               err = -BUSY;
+
+       spin_unlock_irqrestore(&ring->tr_lock, flags);
+
+       if (err)
+               return err;
+
+       memcpy(ev, p, sizeof(*ev));
+       ev->hdr.type = type;
+       mb();
+       ev->hdr.status = 1;
+
+       flush_dcache_page(virt_to_page(ev));
+
+       wake_up_interruptible(&tgt_poll_wait);
+
+       return 0;
+}
+
+int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
+{
+       struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+       struct tgt_event ev;
+       int err;
+
+       memset(&ev, 0, sizeof(ev));
+       ev.p.cmd_req.host_no = shost->host_no;
+       ev.p.cmd_req.data_len = cmd->request_bufflen;
+       memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
+       memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
+       ev.p.cmd_req.attribute = cmd->tag;
+       ev.p.cmd_req.tag = tag;
+
+       dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
+               ev.p.cmd_req.data_len, cmd->tag,
+               (unsigned long long) ev.p.cmd_req.tag);
+
+       err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
+       if (err)
+               eprintk("tx buf is full, could not send\n");
+
+       return err;
+}
+
+int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
+{
+       struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+       struct tgt_event ev;
+       int err;
+
+       memset(&ev, 0, sizeof(ev));
+       ev.p.cmd_done.host_no = shost->host_no;
+       ev.p.cmd_done.tag = tag;
+       ev.p.cmd_done.result = cmd->result;
+
+       dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
+               (unsigned long long) ev.p.cmd_req.tag,
+               ev.p.cmd_req.data_len, cmd->tag);
+
+       err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
+       if (err)
+               eprintk("tx buf is full, could not send\n");
+
+       return err;
+}
+
+int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
+                                 struct scsi_lun *scsilun, void *data)
+{
+       struct tgt_event ev;
+       int err;
+
+       memset(&ev, 0, sizeof(ev));
+       ev.p.tsk_mgmt_req.host_no = host_no;
+       ev.p.tsk_mgmt_req.function = function;
+       ev.p.tsk_mgmt_req.tag = tag;
+       memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
+       ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
+
+       dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
+               (unsigned long long) ev.p.tsk_mgmt_req.mid);
+
+       err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
+       if (err)
+               eprintk("tx buf is full, could not send\n");
+
+       return err;
+}
+
+static int event_recv_msg(struct tgt_event *ev)
+{
+       int err = 0;
+
+       switch (ev->hdr.type) {
+       case TGT_UEVENT_CMD_RSP:
+               err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
+                                          ev->p.cmd_rsp.tag,
+                                          ev->p.cmd_rsp.result,
+                                          ev->p.cmd_rsp.len,
+                                          ev->p.cmd_rsp.uaddr,
+                                          ev->p.cmd_rsp.rw);
+               break;
+       case TGT_UEVENT_TSK_MGMT_RSP:
+               err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
+                                              ev->p.tsk_mgmt_rsp.mid,
+                                              ev->p.tsk_mgmt_rsp.result);
+               break;
+       default:
+               eprintk("unknown type %d\n", ev->hdr.type);
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static ssize_t tgt_write(struct file *file, const char __user * buffer,
+                        size_t count, loff_t * ppos)
+{
+       struct tgt_event *ev;
+       struct tgt_ring *ring = &rx_ring;
+
+       while (1) {
+               ev = tgt_head_event(ring, ring->tr_idx);
+               /* do we need this? */
+               flush_dcache_page(virt_to_page(ev));
+
+               if (!ev->hdr.status)
+                       break;
+
+               tgt_ring_idx_inc(ring);
+               event_recv_msg(ev);
+               ev->hdr.status = 0;
+       };
+
+       return count;
+}
+
+static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
+{
+       struct tgt_event *ev;
+       struct tgt_ring *ring = &tx_ring;
+       unsigned long flags;
+       unsigned int mask = 0;
+       u32 idx;
+
+       poll_wait(file, &tgt_poll_wait, wait);
+
+       spin_lock_irqsave(&ring->tr_lock, flags);
+
+       idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
+       ev = tgt_head_event(ring, idx);
+       if (ev->hdr.status)
+               mask |= POLLIN | POLLRDNORM;
+
+       spin_unlock_irqrestore(&ring->tr_lock, flags);
+
+       return mask;
+}
+
+static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
+                          struct tgt_ring *ring)
+{
+       int i, err;
+
+       for (i = 0; i < TGT_RING_PAGES; i++) {
+               struct page *page = virt_to_page(ring->tr_pages[i]);
+               err = vm_insert_page(vma, addr, page);
+               if (err)
+                       return err;
+               addr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+
+static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       unsigned long addr;
+       int err;
+
+       if (vma->vm_pgoff)
+               return -EINVAL;
+
+       if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
+               eprintk("mmap size must be %lu, not %lu \n",
+                       TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
+               return -EINVAL;
+       }
+
+       addr = vma->vm_start;
+       err = uspace_ring_map(vma, addr, &tx_ring);
+       if (err)
+               return err;
+       err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
+
+       return err;
+}
+
+static int tgt_open(struct inode *inode, struct file *file)
+{
+       tx_ring.tr_idx = rx_ring.tr_idx = 0;
+
+       return 0;
+}
+
+static struct file_operations tgt_fops = {
+       .owner          = THIS_MODULE,
+       .open           = tgt_open,
+       .poll           = tgt_poll,
+       .write          = tgt_write,
+       .mmap           = tgt_mmap,
+};
+
+static struct miscdevice tgt_miscdev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "tgt",
+       .fops = &tgt_fops,
+};
+
+static void tgt_ring_exit(struct tgt_ring *ring)
+{
+       int i;
+
+       for (i = 0; i < TGT_RING_PAGES; i++)
+               free_page(ring->tr_pages[i]);
+}
+
+static int tgt_ring_init(struct tgt_ring *ring)
+{
+       int i;
+
+       spin_lock_init(&ring->tr_lock);
+
+       for (i = 0; i < TGT_RING_PAGES; i++) {
+               ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
+               if (!ring->tr_pages[i]) {
+                       eprintk("out of memory\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+void scsi_tgt_if_exit(void)
+{
+       tgt_ring_exit(&tx_ring);
+       tgt_ring_exit(&rx_ring);
+       misc_deregister(&tgt_miscdev);
+}
+
+int scsi_tgt_if_init(void)
+{
+       int err;
+
+       err = tgt_ring_init(&tx_ring);
+       if (err)
+               return err;
+
+       err = tgt_ring_init(&rx_ring);
+       if (err)
+               goto free_tx_ring;
+
+       err = misc_register(&tgt_miscdev);
+       if (err)
+               goto free_rx_ring;
+
+       return 0;
+free_rx_ring:
+       tgt_ring_exit(&rx_ring);
+free_tx_ring:
+       tgt_ring_exit(&tx_ring);
+
+       return err;
+}
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
new file mode 100644 (file)
index 0000000..d402aff
--- /dev/null
@@ -0,0 +1,745 @@
+/*
+ * SCSI target lib functions
+ *
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tgt.h>
+#include <../drivers/md/dm-bio-list.h>
+
+#include "scsi_tgt_priv.h"
+
+static struct workqueue_struct *scsi_tgtd;
+static struct kmem_cache *scsi_tgt_cmd_cache;
+
+/*
+ * TODO: this struct will be killed when the block layer supports large bios
+ * and James's work struct code is in
+ */
+struct scsi_tgt_cmd {
+       /* TODO replace work with James b's code */
+       struct work_struct work;
+       /* TODO replace the lists with a large bio */
+       struct bio_list xfer_done_list;
+       struct bio_list xfer_list;
+
+       struct list_head hash_list;
+       struct request *rq;
+       u64 tag;
+
+       void *buffer;
+       unsigned bufflen;
+};
+
+#define TGT_HASH_ORDER 4
+#define cmd_hashfn(tag)        hash_long((unsigned long) (tag), TGT_HASH_ORDER)
+
+struct scsi_tgt_queuedata {
+       struct Scsi_Host *shost;
+       struct list_head cmd_hash[1 << TGT_HASH_ORDER];
+       spinlock_t cmd_hash_lock;
+};
+
+/*
+ * Function:   scsi_host_get_command()
+ *
+ * Purpose:    Allocate and setup a scsi command block and blk request
+ *
+ * Arguments:  shost   - scsi host
+ *             data_dir - dma data dir
+ *             gfp_mask- allocator flags
+ *
+ * Returns:    The allocated scsi command structure.
+ *
+ * This should be called by target LLDs to get a command.
+ */
+struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
+                                       enum dma_data_direction data_dir,
+                                       gfp_t gfp_mask)
+{
+       int write = (data_dir == DMA_TO_DEVICE);
+       struct request *rq;
+       struct scsi_cmnd *cmd;
+       struct scsi_tgt_cmd *tcmd;
+
+       /* Bail if we can't get a reference to the device */
+       if (!get_device(&shost->shost_gendev))
+               return NULL;
+
+       tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
+       if (!tcmd)
+               goto put_dev;
+
+       rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
+       if (!rq)
+               goto free_tcmd;
+
+       cmd = __scsi_get_command(shost, gfp_mask);
+       if (!cmd)
+               goto release_rq;
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->sc_data_direction = data_dir;
+       cmd->jiffies_at_alloc = jiffies;
+       cmd->request = rq;
+
+       rq->special = cmd;
+       rq->cmd_type = REQ_TYPE_SPECIAL;
+       rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
+       rq->end_io_data = tcmd;
+
+       bio_list_init(&tcmd->xfer_list);
+       bio_list_init(&tcmd->xfer_done_list);
+       tcmd->rq = rq;
+
+       return cmd;
+
+release_rq:
+       blk_put_request(rq);
+free_tcmd:
+       kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
+put_dev:
+       put_device(&shost->shost_gendev);
+       return NULL;
+
+}
+EXPORT_SYMBOL_GPL(scsi_host_get_command);
+
+/*
+ * Function:   scsi_host_put_command()
+ *
+ * Purpose:    Free a scsi command block
+ *
+ * Arguments:  shost   - scsi host
+ *             cmd     - command block to free
+ *
+ * Returns:    Nothing.
+ *
+ * Notes:      The command must not belong to any lists.
+ */
+void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+       struct request_queue *q = shost->uspace_req_q;
+       struct request *rq = cmd->request;
+       struct scsi_tgt_cmd *tcmd = rq->end_io_data;
+       unsigned long flags;
+
+       kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       __blk_put_request(q, rq);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       __scsi_put_command(shost, cmd, &shost->shost_gendev);
+}
+EXPORT_SYMBOL_GPL(scsi_host_put_command);
+
+static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
+{
+       struct bio *bio;
+
+       /* must call bio_endio in case bio was bounced */
+       while ((bio = bio_list_pop(&tcmd->xfer_done_list))) {
+               bio_endio(bio, bio->bi_size, 0);
+               bio_unmap_user(bio);
+       }
+
+       while ((bio = bio_list_pop(&tcmd->xfer_list))) {
+               bio_endio(bio, bio->bi_size, 0);
+               bio_unmap_user(bio);
+       }
+}
+
+static void cmd_hashlist_del(struct scsi_cmnd *cmd)
+{
+       struct request_queue *q = cmd->request->q;
+       struct scsi_tgt_queuedata *qdata = q->queuedata;
+       unsigned long flags;
+       struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+
+       spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+       list_del(&tcmd->hash_list);
+       spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+}
+
+static void scsi_tgt_cmd_destroy(struct work_struct *work)
+{
+       struct scsi_tgt_cmd *tcmd =
+               container_of(work, struct scsi_tgt_cmd, work);
+       struct scsi_cmnd *cmd = tcmd->rq->special;
+
+       dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
+               rq_data_dir(cmd->request));
+       /*
+        * We fix rq->cmd_flags here since when we told bio_map_user
+        * to write vm for WRITE commands, blk_rq_bio_prep set
+        * rq_data_dir the flags to READ.
+        */
+       if (cmd->sc_data_direction == DMA_TO_DEVICE)
+               cmd->request->cmd_flags |= REQ_RW;
+       else
+               cmd->request->cmd_flags &= ~REQ_RW;
+
+       scsi_unmap_user_pages(tcmd);
+       scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
+}
+
+static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
+                             u64 tag)
+{
+       struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
+       unsigned long flags;
+       struct list_head *head;
+
+       tcmd->tag = tag;
+       INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
+       spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+       head = &qdata->cmd_hash[cmd_hashfn(tag)];
+       list_add(&tcmd->hash_list, head);
+       spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+}
+
+/*
+ * scsi_tgt_alloc_queue - setup queue used for message passing
+ * shost: scsi host
+ *
+ * This should be called by the LLD after host allocation.
+ * And will be released when the host is released.
+ */
+int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
+{
+       struct scsi_tgt_queuedata *queuedata;
+       struct request_queue *q;
+       int err, i;
+
+       /*
+        * Do we need to send a netlink event or should uspace
+        * just respond to the hotplug event?
+        */
+       q = __scsi_alloc_queue(shost, NULL);
+       if (!q)
+               return -ENOMEM;
+
+       queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
+       if (!queuedata) {
+               err = -ENOMEM;
+               goto cleanup_queue;
+       }
+       queuedata->shost = shost;
+       q->queuedata = queuedata;
+
+       /*
+        * this is a silly hack. We should probably just queue as many
+        * command as is recvd to userspace. uspace can then make
+        * sure we do not overload the HBA
+        */
+       q->nr_requests = shost->hostt->can_queue;
+       /*
+        * We currently only support software LLDs so this does
+        * not matter for now. Do we need this for the cards we support?
+        * If so we should make it a host template value.
+        */
+       blk_queue_dma_alignment(q, 0);
+       shost->uspace_req_q = q;
+
+       for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
+               INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
+       spin_lock_init(&queuedata->cmd_hash_lock);
+
+       return 0;
+
+cleanup_queue:
+       blk_cleanup_queue(q);
+       return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
+
+void scsi_tgt_free_queue(struct Scsi_Host *shost)
+{
+       int i;
+       unsigned long flags;
+       struct request_queue *q = shost->uspace_req_q;
+       struct scsi_cmnd *cmd;
+       struct scsi_tgt_queuedata *qdata = q->queuedata;
+       struct scsi_tgt_cmd *tcmd, *n;
+       LIST_HEAD(cmds);
+
+       spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+
+       for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
+               list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
+                                        hash_list) {
+                       list_del(&tcmd->hash_list);
+                       list_add(&tcmd->hash_list, &cmds);
+               }
+       }
+
+       spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+
+       while (!list_empty(&cmds)) {
+               tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
+               list_del(&tcmd->hash_list);
+               cmd = tcmd->rq->special;
+
+               shost->hostt->eh_abort_handler(cmd);
+               scsi_tgt_cmd_destroy(&tcmd->work);
+       }
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
+
+struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
+{
+       struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
+       return queue->shost;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
+
+/*
+ * scsi_tgt_queue_command - queue command for userspace processing
+ * @cmd:       scsi command
+ * @scsilun:   scsi lun
+ * @tag:       unique value to identify this command for tmf
+ */
+int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun,
+                          u64 tag)
+{
+       struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+       int err;
+
+       init_scsi_tgt_cmd(cmd->request, tcmd, tag);
+       err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag);
+       if (err)
+               cmd_hashlist_del(cmd);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
+
+/*
+ * This is run from a interrpt handler normally and the unmap
+ * needs process context so we must queue
+ */
+static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
+{
+       struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+
+       dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+
+       scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+       queue_work(scsi_tgtd, &tcmd->work);
+}
+
+static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
+       int err;
+
+       dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+
+       err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
+       switch (err) {
+       case SCSI_MLQUEUE_HOST_BUSY:
+       case SCSI_MLQUEUE_DEVICE_BUSY:
+               return -EAGAIN;
+       }
+
+       return 0;
+}
+
+static void scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
+{
+       struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+       int err;
+
+       err = __scsi_tgt_transfer_response(cmd);
+       if (!err)
+               return;
+
+       cmd->result = DID_BUS_BUSY << 16;
+       err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+       if (err <= 0)
+               /* the eh will have to pick this up */
+               printk(KERN_ERR "Could not send cmd %p status\n", cmd);
+}
+
+static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)
+{
+       struct request *rq = cmd->request;
+       struct scsi_tgt_cmd *tcmd = rq->end_io_data;
+       int count;
+
+       cmd->use_sg = rq->nr_phys_segments;
+       cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask);
+       if (!cmd->request_buffer)
+               return -ENOMEM;
+
+       cmd->request_bufflen = rq->data_len;
+
+       dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
+               rq_data_dir(rq));
+       count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
+       if (likely(count <= cmd->use_sg)) {
+               cmd->use_sg = count;
+               return 0;
+       }
+
+       eprintk("cmd %p addr %p cnt %d\n", cmd, tcmd->buffer, cmd->use_sg);
+       scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+       return -EINVAL;
+}
+
+/* TODO: test this crap and replace bio_map_user with new interface maybe */
+static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
+                              int rw)
+{
+       struct request_queue *q = cmd->request->q;
+       struct request *rq = cmd->request;
+       void *uaddr = tcmd->buffer;
+       unsigned int len = tcmd->bufflen;
+       struct bio *bio;
+       int err;
+
+       while (len > 0) {
+               dprintk("%lx %u\n", (unsigned long) uaddr, len);
+               bio = bio_map_user(q, NULL, (unsigned long) uaddr, len, rw);
+               if (IS_ERR(bio)) {
+                       err = PTR_ERR(bio);
+                       dprintk("fail to map %lx %u %d %x\n",
+                               (unsigned long) uaddr, len, err, cmd->cmnd[0]);
+                       goto unmap_bios;
+               }
+
+               uaddr += bio->bi_size;
+               len -= bio->bi_size;
+
+               /*
+                * The first bio is added and merged. We could probably
+                * try to add others using scsi_merge_bio() but for now
+                * we keep it simple. The first bio should be pretty large
+                * (either hitting the 1 MB bio pages limit or a queue limit)
+                * already but for really large IO we may want to try and
+                * merge these.
+                */
+               if (!rq->bio) {
+                       blk_rq_bio_prep(q, rq, bio);
+                       rq->data_len = bio->bi_size;
+               } else
+                       /* put list of bios to transfer in next go around */
+                       bio_list_add(&tcmd->xfer_list, bio);
+       }
+
+       cmd->offset = 0;
+       err = scsi_tgt_init_cmd(cmd, GFP_KERNEL);
+       if (err)
+               goto unmap_bios;
+
+       return 0;
+
+unmap_bios:
+       if (rq->bio) {
+               bio_unmap_user(rq->bio);
+               while ((bio = bio_list_pop(&tcmd->xfer_list)))
+                       bio_unmap_user(bio);
+       }
+
+       return err;
+}
+
+static int scsi_tgt_transfer_data(struct scsi_cmnd *);
+
+static void scsi_tgt_data_transfer_done(struct scsi_cmnd *cmd)
+{
+       struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
+       struct bio *bio;
+       int err;
+
+       /* should we free resources here on error ? */
+       if (cmd->result) {
+send_uspace_err:
+               err = scsi_tgt_uspace_send_status(cmd, tcmd->tag);
+               if (err <= 0)
+                       /* the tgt uspace eh will have to pick this up */
+                       printk(KERN_ERR "Could not send cmd %p status\n", cmd);
+               return;
+       }
+
+       dprintk("cmd %p request_bufflen %u bufflen %u\n",
+               cmd, cmd->request_bufflen, tcmd->bufflen);
+
+       scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
+       bio_list_add(&tcmd->xfer_done_list, cmd->request->bio);
+
+       tcmd->buffer += cmd->request_bufflen;
+       cmd->offset += cmd->request_bufflen;
+
+       if (!tcmd->xfer_list.head) {
+               scsi_tgt_transfer_response(cmd);
+               return;
+       }
+
+       dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
+               cmd, cmd->request_bufflen, tcmd->bufflen);
+
+       bio = bio_list_pop(&tcmd->xfer_list);
+       BUG_ON(!bio);
+
+       blk_rq_bio_prep(cmd->request->q, cmd->request, bio);
+       cmd->request->data_len = bio->bi_size;
+       err = scsi_tgt_init_cmd(cmd, GFP_ATOMIC);
+       if (err) {
+               cmd->result = DID_ERROR << 16;
+               goto send_uspace_err;
+       }
+
+       if (scsi_tgt_transfer_data(cmd)) {
+               cmd->result = DID_NO_CONNECT << 16;
+               goto send_uspace_err;
+       }
+}
+
+static int scsi_tgt_transfer_data(struct scsi_cmnd *cmd)
+{
+       int err;
+       struct Scsi_Host *host = scsi_tgt_cmd_to_host(cmd);
+
+       err = host->hostt->transfer_data(cmd, scsi_tgt_data_transfer_done);
+       switch (err) {
+               case SCSI_MLQUEUE_HOST_BUSY:
+               case SCSI_MLQUEUE_DEVICE_BUSY:
+                       return -EAGAIN;
+       default:
+               return 0;
+       }
+}
+
+static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
+                               unsigned len)
+{
+       char __user *p = (char __user *) uaddr;
+
+       if (copy_from_user(cmd->sense_buffer, p,
+                          min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
+               printk(KERN_ERR "Could not copy the sense buffer\n");
+               return -EIO;
+       }
+       return 0;
+}
+
+static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+       struct scsi_tgt_cmd *tcmd;
+       int err;
+
+       err = shost->hostt->eh_abort_handler(cmd);
+       if (err)
+               eprintk("fail to abort %p\n", cmd);
+
+       tcmd = cmd->request->end_io_data;
+       scsi_tgt_cmd_destroy(&tcmd->work);
+       return err;
+}
+
+static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
+{
+       struct scsi_tgt_queuedata *qdata = q->queuedata;
+       struct request *rq = NULL;
+       struct list_head *head;
+       struct scsi_tgt_cmd *tcmd;
+       unsigned long flags;
+
+       head = &qdata->cmd_hash[cmd_hashfn(tag)];
+       spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
+       list_for_each_entry(tcmd, head, hash_list) {
+               if (tcmd->tag == tag) {
+                       rq = tcmd->rq;
+                       list_del(&tcmd->hash_list);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
+
+       return rq;
+}
+
+int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
+                        unsigned long uaddr, u8 rw)
+{
+       struct Scsi_Host *shost;
+       struct scsi_cmnd *cmd;
+       struct request *rq;
+       struct scsi_tgt_cmd *tcmd;
+       int err = 0;
+
+       dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
+               result, len, uaddr, rw);
+
+       /* TODO: replace with a O(1) alg */
+       shost = scsi_host_lookup(host_no);
+       if (IS_ERR(shost)) {
+               printk(KERN_ERR "Could not find host no %d\n", host_no);
+               return -EINVAL;
+       }
+
+       if (!shost->uspace_req_q) {
+               printk(KERN_ERR "Not target scsi host %d\n", host_no);
+               goto done;
+       }
+
+       rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
+       if (!rq) {
+               printk(KERN_ERR "Could not find tag %llu\n",
+                      (unsigned long long) tag);
+               err = -EINVAL;
+               goto done;
+       }
+       cmd = rq->special;
+
+       dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
+               result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
+
+       if (result == TASK_ABORTED) {
+               scsi_tgt_abort_cmd(shost, cmd);
+               goto done;
+       }
+       /*
+        * store the userspace values here, the working values are
+        * in the request_* values
+        */
+       tcmd = cmd->request->end_io_data;
+       tcmd->buffer = (void *)uaddr;
+       tcmd->bufflen = len;
+       cmd->result = result;
+
+       if (!tcmd->bufflen || cmd->request_buffer) {
+               err = __scsi_tgt_transfer_response(cmd);
+               goto done;
+       }
+
+       /*
+        * TODO: Do we need to handle case where request does not
+        * align with LLD.
+        */
+       err = scsi_map_user_pages(rq->end_io_data, cmd, rw);
+       if (err) {
+               eprintk("%p %d\n", cmd, err);
+               err = -EAGAIN;
+               goto done;
+       }
+
+       /* userspace failure */
+       if (cmd->result) {
+               if (status_byte(cmd->result) == CHECK_CONDITION)
+                       scsi_tgt_copy_sense(cmd, uaddr, len);
+               err = __scsi_tgt_transfer_response(cmd);
+               goto done;
+       }
+       /* ask the target LLD to transfer the data to the buffer */
+       err = scsi_tgt_transfer_data(cmd);
+
+done:
+       scsi_host_put(shost);
+       return err;
+}
+
+int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, int function, u64 tag,
+                             struct scsi_lun *scsilun, void *data)
+{
+       int err;
+
+       /* TODO: need to retry if this fails. */
+       err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, function,
+                                           tag, scsilun, data);
+       if (err < 0)
+               eprintk("The task management request lost!\n");
+       return err;
+}
+EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
+
+int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result)
+{
+       struct Scsi_Host *shost;
+       int err = -EINVAL;
+
+       dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
+
+       shost = scsi_host_lookup(host_no);
+       if (IS_ERR(shost)) {
+               printk(KERN_ERR "Could not find host no %d\n", host_no);
+               return err;
+       }
+
+       if (!shost->uspace_req_q) {
+               printk(KERN_ERR "Not target scsi host %d\n", host_no);
+               goto done;
+       }
+
+       err = shost->hostt->tsk_mgmt_response(mid, result);
+done:
+       scsi_host_put(shost);
+       return err;
+}
+
+static int __init scsi_tgt_init(void)
+{
+       int err;
+
+       scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd",
+                                              sizeof(struct scsi_tgt_cmd),
+                                              0, 0, NULL, NULL);
+       if (!scsi_tgt_cmd_cache)
+               return -ENOMEM;
+
+       scsi_tgtd = create_workqueue("scsi_tgtd");
+       if (!scsi_tgtd) {
+               err = -ENOMEM;
+               goto free_kmemcache;
+       }
+
+       err = scsi_tgt_if_init();
+       if (err)
+               goto destroy_wq;
+
+       return 0;
+
+destroy_wq:
+       destroy_workqueue(scsi_tgtd);
+free_kmemcache:
+       kmem_cache_destroy(scsi_tgt_cmd_cache);
+       return err;
+}
+
+static void __exit scsi_tgt_exit(void)
+{
+       destroy_workqueue(scsi_tgtd);
+       scsi_tgt_if_exit();
+       kmem_cache_destroy(scsi_tgt_cmd_cache);
+}
+
+module_init(scsi_tgt_init);
+module_exit(scsi_tgt_exit);
+
+MODULE_DESCRIPTION("SCSI target core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
new file mode 100644 (file)
index 0000000..84488c5
--- /dev/null
@@ -0,0 +1,25 @@
+struct scsi_cmnd;
+struct scsi_lun;
+struct Scsi_Host;
+struct task_struct;
+
+/* tmp - will replace with SCSI logging stuff */
+#define eprintk(fmt, args...)                                  \
+do {                                                           \
+       printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args);  \
+} while (0)
+
+#define dprintk(fmt, args...)
+/* #define dprintk eprintk */
+
+extern void scsi_tgt_if_exit(void);
+extern int scsi_tgt_if_init(void);
+
+extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun,
+                                   u64 tag);
+extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag);
+extern int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
+                               unsigned long uaddr, u8 rw);
+extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
+                                        struct scsi_lun *scsilun, void *data);
+extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 mid, int result);
index 38c215a78f69f77c153b254e1fefb4f336600468..3571ce8934e7af15f8924a832c6be7047fbbecb4 100644 (file)
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names)
 #define FC_MGMTSRVR_PORTID             0x00000a
 
 
-static void fc_timeout_deleted_rport(void *data);
-static void fc_timeout_fail_rport_io(void *data);
-static void fc_scsi_scan_rport(void *data);
+static void fc_timeout_deleted_rport(struct work_struct *work);
+static void fc_timeout_fail_rport_io(struct work_struct *work);
+static void fc_scsi_scan_rport(struct work_struct *work);
 
 /*
  * Attribute counts pre object type...
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost)
  *     1 on success / 0 already queued / < 0 for error
  **/
 static int
-fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
+fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
                                unsigned long delay)
 {
        if (unlikely(!fc_host_devloss_work_q(shost))) {
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work,
                return -EINVAL;
        }
 
-       if (delay == 0)
-               return queue_work(fc_host_devloss_work_q(shost), work);
-
        return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
 }
 
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host);
  * fc_starget_delete - called to delete the scsi decendents of an rport
  *                  (target and all sdevs)
  *
- * @data:      remote port to be operated on.
+ * @work:      remote port to be operated on.
  **/
 static void
-fc_starget_delete(void *data)
+fc_starget_delete(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, stgt_delete_work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        unsigned long flags;
        struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data)
 /**
  * fc_rport_final_delete - finish rport termination and delete it.
  *
- * @data:      remote port to be deleted.
+ * @work:      remote port to be deleted.
  **/
 static void
-fc_rport_final_delete(void *data)
+fc_rport_final_delete(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, rport_delete_work);
        struct device *dev = &rport->dev;
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data)
 
        /* Delete SCSI target and sdevs */
        if (rport->scsi_target_id != -1)
-               fc_starget_delete(data);
+               fc_starget_delete(&rport->stgt_delete_work);
        else if (i->f->dev_loss_tmo_callbk)
                i->f->dev_loss_tmo_callbk(rport);
        else if (i->f->terminate_rport_io)
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
        rport->channel = channel;
        rport->fast_io_fail_tmo = -1;
 
-       INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport);
-       INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport);
-       INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport);
-       INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport);
-       INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport);
+       INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
+       INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
+       INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
+       INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
+       INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
 
        spin_lock_irqsave(shost->host_lock, flags);
 
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
                        }
 
                        if (match) {
-                               struct work_struct *work = 
+                               struct delayed_work *work =
                                                        &rport->dev_loss_work;
 
                                memcpy(&rport->node_name, &ids->node_name,
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
  *                       was a SCSI target (thus was blocked), and failed
  *                       to return in the alloted time.
  * 
- * @data:      rport target that failed to reappear in the alloted time.
+ * @work:      rport target that failed to reappear in the alloted time.
  **/
 static void
-fc_timeout_deleted_rport(void  *data)
+fc_timeout_deleted_rport(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, dev_loss_work.work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
        unsigned long flags;
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void  *data)
  * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a
  *                       disconnected SCSI target.
  *
- * @data:      rport to terminate io on.
+ * @work:      rport to terminate io on.
  *
  * Notes: Only requests the failure of the io, not that all are flushed
  *    prior to returning.
  **/
 static void
-fc_timeout_fail_rport_io(void  *data)
+fc_timeout_fail_rport_io(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, fail_io_work.work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        struct fc_internal *i = to_fc_internal(shost->transportt);
 
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void  *data)
 /**
  * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
  *
- * @data:      remote port to be scanned.
+ * @work:      remote port to be scanned.
  **/
 static void
-fc_scsi_scan_rport(void *data)
+fc_scsi_scan_rport(struct work_struct *work)
 {
-       struct fc_rport *rport = (struct fc_rport *)data;
+       struct fc_rport *rport =
+               container_of(work, struct fc_rport, scan_work);
        struct Scsi_Host *shost = rport_to_shost(rport);
        unsigned long flags;
 
index 9b25124a989e8cf134721292a5231eb6421b25f7..9c22f1342715c7a1047f45bf1ee9e59941d26e88 100644 (file)
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
        return 0;
 }
 
-static void session_recovery_timedout(void *data)
+static void session_recovery_timedout(struct work_struct *work)
 {
-       struct iscsi_cls_session *session = data;
+       struct iscsi_cls_session *session =
+               container_of(work, struct iscsi_cls_session,
+                            recovery_work.work);
 
        dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
                  "out after %d secs\n", session->recovery_tmo);
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost,
 
        session->transport = transport;
        session->recovery_tmo = 120;
-       INIT_WORK(&session->recovery_work, session_recovery_timedout, session);
+       INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
        INIT_LIST_HEAD(&session->host_list);
        INIT_LIST_HEAD(&session->sess_list);
 
index 9f070f0d0f2bfcf1d829324492a220e284745d42..3fded4831460573b4c5996fdc0f1bc3c8cf513ca 100644 (file)
@@ -964,9 +964,10 @@ struct work_queue_wrapper {
 };
 
 static void
-spi_dv_device_work_wrapper(void *data)
+spi_dv_device_work_wrapper(struct work_struct *work)
 {
-       struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
+       struct work_queue_wrapper *wqw =
+               container_of(work, struct work_queue_wrapper, work);
        struct scsi_device *sdev = wqw->sdev;
 
        kfree(wqw);
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev)
                return;
        }
 
-       INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
+       INIT_WORK(&wqw->work, spi_dv_device_work_wrapper);
        wqw->sdev = sdev;
 
        schedule_work(&wqw->work);
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
new file mode 100644 (file)
index 0000000..8a63610
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * scsi_wait_scan.c
+ *
+ * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
+ *
+ * This is a simple module to wait until all the async scans are
+ * complete.  The idea is to use it in initrd/initramfs scripts.  You
+ * modprobe it after all the modprobes of the root SCSI drivers and it
+ * will wait until they have all finished scanning their busses before
+ * allowing the boot to proceed
+ */
+
+#include <linux/module.h>
+#include "scsi_priv.h"
+
+static int __init wait_scan_init(void)
+{
+       scsi_complete_async_scans();
+       return 0;
+}
+
+static void __exit wait_scan_exit(void)
+{
+}
+
+MODULE_DESCRIPTION("SCSI wait for scans");
+MODULE_AUTHOR("James Bottomley");
+MODULE_LICENSE("GPL");
+
+late_initcall(wait_scan_init);
+module_exit(wait_scan_exit);
index 84ff203ffedd77749276358825e488fb6d8cfd9d..f6a452846fab7ba75a85c0242cdd4b4f3b881384 100644 (file)
@@ -1051,6 +1051,14 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
                                                      &sshdr, SD_TIMEOUT,
                                                      SD_MAX_RETRIES);
 
+                       /*
+                        * If the drive has indicated to us that it
+                        * doesn't have any media in it, don't bother
+                        * with any more polling.
+                        */
+                       if (media_not_present(sdkp, &sshdr))
+                               return;
+
                        if (the_result)
                                sense_valid = scsi_sense_valid(&sshdr);
                        retries++;
@@ -1059,14 +1067,6 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
                          ((driver_byte(the_result) & DRIVER_SENSE) &&
                          sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
 
-               /*
-                * If the drive has indicated to us that it doesn't have
-                * any media in it, don't bother with any of the rest of
-                * this crap.
-                */
-               if (media_not_present(sdkp, &sshdr))
-                       return;
-
                if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
                        /* no sense, TUR either succeeded or failed
                         * with a status error */
@@ -1467,7 +1467,6 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
        res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
 
        if (scsi_status_is_good(res)) {
-               int ct = 0;
                int offset = data.header_length + data.block_descriptor_length;
 
                if (offset >= SD_BUF_SIZE - 2) {
@@ -1496,11 +1495,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
                        sdkp->DPOFUA = 0;
                }
 
-               ct =  sdkp->RCD + 2*sdkp->WCE;
-
-               printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
-                      diskname, sd_cache_types[ct],
-                      sdkp->DPOFUA ? " w/ FUA" : "");
+               printk(KERN_NOTICE "SCSI device %s: "
+                      "write cache: %s, read cache: %s, %s\n",
+                      diskname,
+                      sdkp->WCE ? "enabled" : "disabled",
+                      sdkp->RCD ? "disabled" : "enabled",
+                      sdkp->DPOFUA ? "supports DPO and FUA"
+                      : "doesn't support DPO or FUA");
 
                return;
        }
index e1a52c525ed492155a8243722e4387428198548b..587274dd70596a18d7eda3f9326bd8220a9ec017 100644 (file)
@@ -9,7 +9,7 @@
    Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
    Michael Schaefer, J"org Weule, and Eric Youngdale.
 
-   Copyright 1992 - 2005 Kai Makisara
+   Copyright 1992 - 2006 Kai Makisara
    email Kai.Makisara@kolumbus.fi
 
    Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
    Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  */
 
-static const char *verstr = "20050830";
+static const char *verstr = "20061107";
 
 #include <linux/module.h>
 
@@ -999,7 +999,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
                        STp->min_block = ((STp->buffer)->b_data[4] << 8) |
                            (STp->buffer)->b_data[5];
                        if ( DEB( debugging || ) !STp->inited)
-                               printk(KERN_WARNING
+                               printk(KERN_INFO
                                        "%s: Block limits %d - %d bytes.\n", name,
                                        STp->min_block, STp->max_block);
                } else {
@@ -1224,7 +1224,7 @@ static int st_flush(struct file *filp, fl_owner_t id)
        }
 
        DEBC( if (STp->nbr_requests)
-               printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
+               printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
                       name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
 
        if (STps->rw == ST_WRITING && !STp->pos_unknown) {
@@ -4056,11 +4056,11 @@ static int st_probe(struct device *dev)
                        goto out_free_tape;
        }
 
-       sdev_printk(KERN_WARNING, SDp,
+       sdev_printk(KERN_NOTICE, SDp,
                    "Attached scsi tape %s\n", tape_name(tpnt));
-       printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
-              tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
-              queue_dma_alignment(SDp->request_queue) + 1);
+       sdev_printk(KERN_INFO, SDp, "%s: try direct i/o: %s (alignment %d B)\n",
+                   tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
+                   queue_dma_alignment(SDp->request_queue) + 1);
 
        return 0;
 
index 185c270bb0433f9dae1bb9427e61092d9c905796..ba6bcdaf2a6afdaf896da7c8a49ab982741d0ca6 100644 (file)
@@ -11,8 +11,6 @@
  *     Written By:
  *             Ed Lin <promise_linux@promise.com>
  *
- *     Version: 3.0.0.1
- *
  */
 
 #include <linux/init.h>
@@ -37,9 +35,9 @@
 #include <scsi/scsi_tcq.h>
 
 #define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "3.0.0.1"
+#define ST_DRIVER_VERSION "3.1.0.1"
 #define ST_VER_MAJOR           3
-#define ST_VER_MINOR           0
+#define ST_VER_MINOR           1
 #define ST_OEM                         0
 #define ST_BUILD_VER           1
 
@@ -76,8 +74,10 @@ enum {
        MU_STATE_STARTED                        = 4,
        MU_STATE_RESETTING                      = 5,
 
-       MU_MAX_DELAY_TIME                       = 240000,
+       MU_MAX_DELAY                            = 120,
        MU_HANDSHAKE_SIGNATURE                  = 0x55aaaa55,
+       MU_HANDSHAKE_SIGNATURE_HALF             = 0x5a5a0000,
+       MU_HARD_RESET_WAIT                      = 30000,
        HMU_PARTNER_TYPE                        = 2,
 
        /* firmware returned values */
@@ -120,7 +120,8 @@ enum {
 
        st_shasta                               = 0,
        st_vsc                                  = 1,
-       st_yosemite                             = 2,
+       st_vsc1                                 = 2,
+       st_yosemite                             = 3,
 
        PASSTHRU_REQ_TYPE                       = 0x00000001,
        PASSTHRU_REQ_NO_WAKEUP                  = 0x00000100,
@@ -150,6 +151,8 @@ enum {
        MGT_CMD_SIGNATURE                       = 0xba,
 
        INQUIRY_EVPD                            = 0x01,
+
+       ST_ADDITIONAL_MEM                       = 0x200000,
 };
 
 /* SCSI inquiry data */
@@ -211,7 +214,9 @@ struct handshake_frame {
        __le32 partner_ver_minor;
        __le32 partner_ver_oem;
        __le32 partner_ver_build;
-       u32 reserved1[4];
+       __le32 extra_offset;    /* NEW */
+       __le32 extra_size;      /* NEW */
+       u32 reserved1[2];
 };
 
 struct req_msg {
@@ -302,6 +307,7 @@ struct st_hba {
        void __iomem *mmio_base;        /* iomapped PCI memory space */
        void *dma_mem;
        dma_addr_t dma_handle;
+       size_t dma_size;
 
        struct Scsi_Host *host;
        struct pci_dev *pdev;
@@ -507,6 +513,7 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
        size_t count = sizeof(struct st_frame);
 
        p = hba->copy_buffer;
+       stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD);
        memset(p->base, 0, sizeof(u32)*6);
        *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
        p->rom_addr = 0;
@@ -901,27 +908,34 @@ static int stex_handshake(struct st_hba *hba)
        void __iomem *base = hba->mmio_base;
        struct handshake_frame *h;
        dma_addr_t status_phys;
-       int i;
+       u32 data;
+       unsigned long before;
 
        if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
                writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
                readl(base + IDBL);
-               for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
-                       && i < MU_MAX_DELAY_TIME; i++) {
+               before = jiffies;
+               while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+                       if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+                               printk(KERN_ERR DRV_NAME
+                                       "(%s): no handshake signature\n",
+                                       pci_name(hba->pdev));
+                               return -1;
+                       }
                        rmb();
                        msleep(1);
                }
-
-               if (i == MU_MAX_DELAY_TIME) {
-                       printk(KERN_ERR DRV_NAME
-                               "(%s): no handshake signature\n",
-                               pci_name(hba->pdev));
-                       return -1;
-               }
        }
 
        udelay(10);
 
+       data = readl(base + OMR1);
+       if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
+               data &= 0x0000ffff;
+               if (hba->host->can_queue > data)
+                       hba->host->can_queue = data;
+       }
+
        h = (struct handshake_frame *)(hba->dma_mem + MU_REQ_BUFFER_SIZE);
        h->rb_phy = cpu_to_le32(hba->dma_handle);
        h->rb_phy_hi = cpu_to_le32((hba->dma_handle >> 16) >> 16);
@@ -931,6 +945,11 @@ static int stex_handshake(struct st_hba *hba)
        h->status_cnt = cpu_to_le16(MU_STATUS_COUNT);
        stex_gettime(&h->hosttime);
        h->partner_type = HMU_PARTNER_TYPE;
+       if (hba->dma_size > STEX_BUFFER_SIZE) {
+               h->extra_offset = cpu_to_le32(STEX_BUFFER_SIZE);
+               h->extra_size = cpu_to_le32(ST_ADDITIONAL_MEM);
+       } else
+               h->extra_offset = h->extra_size = 0;
 
        status_phys = hba->dma_handle + MU_REQ_BUFFER_SIZE;
        writel(status_phys, base + IMR0);
@@ -944,19 +963,18 @@ static int stex_handshake(struct st_hba *hba)
        readl(base + IDBL); /* flush */
 
        udelay(10);
-       for (i = 0; readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE
-               && i < MU_MAX_DELAY_TIME; i++) {
+       before = jiffies;
+       while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
+               if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+                       printk(KERN_ERR DRV_NAME
+                               "(%s): no signature after handshake frame\n",
+                               pci_name(hba->pdev));
+                       return -1;
+               }
                rmb();
                msleep(1);
        }
 
-       if (i == MU_MAX_DELAY_TIME) {
-               printk(KERN_ERR DRV_NAME
-                       "(%s): no signature after handshake frame\n",
-                       pci_name(hba->pdev));
-               return -1;
-       }
-
        writel(0, base + IMR0);
        readl(base + IMR0);
        writel(0, base + OMR0);
@@ -1038,9 +1056,9 @@ static void stex_hard_reset(struct st_hba *hba)
        pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
        pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
 
-       for (i = 0; i < MU_MAX_DELAY_TIME; i++) {
+       for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
                pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
-               if (pci_cmd & PCI_COMMAND_MASTER)
+               if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
                        break;
                msleep(1);
        }
@@ -1100,18 +1118,18 @@ static int stex_reset(struct scsi_cmnd *cmd)
 static int stex_biosparam(struct scsi_device *sdev,
        struct block_device *bdev, sector_t capacity, int geom[])
 {
-       int heads = 255, sectors = 63, cylinders;
+       int heads = 255, sectors = 63;
 
        if (capacity < 0x200000) {
                heads = 64;
                sectors = 32;
        }
 
-       cylinders = sector_div(capacity, heads * sectors);
+       sector_div(capacity, heads * sectors);
 
        geom[0] = heads;
        geom[1] = sectors;
-       geom[2] = cylinders;
+       geom[2] = capacity;
 
        return 0;
 }
@@ -1193,8 +1211,13 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_iounmap;
        }
 
+       hba->cardtype = (unsigned int) id->driver_data;
+       if (hba->cardtype == st_vsc && (pdev->subsystem_device & 0xf) == 0x1)
+               hba->cardtype = st_vsc1;
+       hba->dma_size = (hba->cardtype == st_vsc1) ?
+               (STEX_BUFFER_SIZE + ST_ADDITIONAL_MEM) : (STEX_BUFFER_SIZE);
        hba->dma_mem = dma_alloc_coherent(&pdev->dev,
-               STEX_BUFFER_SIZE, &hba->dma_handle, GFP_KERNEL);
+               hba->dma_size, &hba->dma_handle, GFP_KERNEL);
        if (!hba->dma_mem) {
                err = -ENOMEM;
                printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
@@ -1207,8 +1230,6 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        hba->copy_buffer = hba->dma_mem + MU_BUFFER_SIZE;
        hba->mu_status = MU_STATE_STARTING;
 
-       hba->cardtype = (unsigned int) id->driver_data;
-
        /* firmware uses id/lun pair for a logical drive, but lun would be
           always 0 if CONFIG_SCSI_MULTI_LUN not configured, so we use
           channel to map lun here */
@@ -1233,7 +1254,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (err)
                goto out_free_irq;
 
-       err = scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
+       err = scsi_init_shared_tag_map(host, host->can_queue);
        if (err) {
                printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
                        pci_name(pdev));
@@ -1256,7 +1277,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 out_free_irq:
        free_irq(pdev->irq, hba);
 out_pci_free:
-       dma_free_coherent(&pdev->dev, STEX_BUFFER_SIZE,
+       dma_free_coherent(&pdev->dev, hba->dma_size,
                          hba->dma_mem, hba->dma_handle);
 out_iounmap:
        iounmap(hba->mmio_base);
@@ -1317,7 +1338,7 @@ static void stex_hba_free(struct st_hba *hba)
 
        pci_release_regions(hba->pdev);
 
-       dma_free_coherent(&hba->pdev->dev, STEX_BUFFER_SIZE,
+       dma_free_coherent(&hba->pdev->dev, hba->dma_size,
                          hba->dma_mem, hba->dma_handle);
 }
 
@@ -1346,15 +1367,32 @@ static void stex_shutdown(struct pci_dev *pdev)
 }
 
 static struct pci_device_id stex_pci_tbl[] = {
-       { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0xf350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0x8301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x105a, 0x8302, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_shasta },
-       { 0x1725, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
-       { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite },
+       /* st_shasta */
+       { 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
+       { 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               st_shasta }, /* SuperTrak EX12350 */
+       { 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               st_shasta }, /* SuperTrak EX4350 */
+       { 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               st_shasta }, /* SuperTrak EX24350 */
+
+       /* st_vsc */
+       { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
+
+       /* st_yosemite */
+       { 0x105a, 0x8650, PCI_ANY_ID, 0x4600, 0, 0,
+               st_yosemite }, /* SuperTrak EX4650 */
+       { 0x105a, 0x8650, PCI_ANY_ID, 0x4610, 0, 0,
+               st_yosemite }, /* SuperTrak EX4650o */
+       { 0x105a, 0x8650, PCI_ANY_ID, 0x8600, 0, 0,
+               st_yosemite }, /* SuperTrak EX8650EL */
+       { 0x105a, 0x8650, PCI_ANY_ID, 0x8601, 0, 0,
+               st_yosemite }, /* SuperTrak EX8650 */
+       { 0x105a, 0x8650, PCI_ANY_ID, 0x8602, 0, 0,
+               st_yosemite }, /* SuperTrak EX8654 */
+       { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               st_yosemite }, /* generic st_yosemite */
        { }     /* terminate list */
 };
 MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
index 646e840266e29fb2b6a0fce71aef1906386b279f..76a069b7ac0bbd7ef2ae26a61a4c612b4f67b340 100644 (file)
@@ -8,20 +8,20 @@
  *     drew@colorado.edu
  *      +1 (303) 440-4894
  *
- * DISTRIBUTION RELEASE 3. 
+ * DISTRIBUTION RELEASE 3.
  *
- * For more information, please consult 
+ * For more information, please consult
  *
  * Trantor Systems, Ltd.
  * T128/T128F/T228 SCSI Host Adapter
  * Hardware Specifications
- * 
- * Trantor Systems, Ltd. 
+ *
+ * Trantor Systems, Ltd.
  * 5415 Randall Place
  * Fremont, CA 94538
  * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
- * 
- * and 
+ *
+ * and
  *
  * NCR 5380 Family
  * SCSI Protocol Controller
 #define TDEBUG_TRANSFER 0x2
 
 /*
- * The trantor boards are memory mapped. They use an NCR5380 or 
+ * The trantor boards are memory mapped. They use an NCR5380 or
  * equivalent (my sample board had part second sourced from ZILOG).
- * NCR's recommended "Pseudo-DMA" architecture is used, where 
+ * NCR's recommended "Pseudo-DMA" architecture is used, where
  * a PAL drives the DMA signals on the 5380 allowing fast, blind
- * transfers with proper handshaking. 
+ * transfers with proper handshaking.
  */
 
 /*
- * Note : a boot switch is provided for the purpose of informing the 
+ * Note : a boot switch is provided for the purpose of informing the
  * firmware to boot or not boot from attached SCSI devices.  So, I imagine
  * there are fewer people who've yanked the ROM like they do on the Seagate
  * to make bootup faster, and I'll probably use this for autodetection.
 #define T_DATA_REG_OFFSET      0x1e00  /* rw 512 bytes long */
 
 #ifndef ASM
-static int t128_abort(Scsi_Cmnd *);
+static int t128_abort(struct scsi_cmnd *);
 static int t128_biosparam(struct scsi_device *, struct block_device *,
                          sector_t, int*);
 static int t128_detect(struct scsi_host_template *);
-static int t128_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-static int t128_bus_reset(Scsi_Cmnd *);
+static int t128_queue_command(struct scsi_cmnd *,
+                             void (*done)(struct scsi_cmnd *));
+static int t128_bus_reset(struct scsi_cmnd *);
 
 #ifndef CMD_PER_LUN
 #define CMD_PER_LUN 2
 #endif
 
 #ifndef CAN_QUEUE
-#define CAN_QUEUE 32 
+#define CAN_QUEUE 32
 #endif
 
 #ifndef HOSTS_C
@@ -120,7 +121,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
 
 #define T128_address(reg) (base + T_5380_OFFSET + ((reg) * 0x20))
 
-#if !(TDEBUG & TDEBUG_TRANSFER) 
+#if !(TDEBUG & TDEBUG_TRANSFER)
 #define NCR5380_read(reg) readb(T128_address(reg))
 #define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
 #else
@@ -129,7 +130,7 @@ static int t128_bus_reset(Scsi_Cmnd *);
     , instance->hostno, (reg), T128_address(reg))), readb(T128_address(reg)))
 
 #define NCR5380_write(reg, value) {                                    \
-    printk("scsi%d : write %02x to register %d at address %08x\n",     \
+    printk("scsi%d : write %02x to register %d at address %08x\n",     \
            instance->hostno, (value), (reg), T128_address(reg));       \
     writeb((value), (T128_address(reg)));                              \
 }
@@ -142,10 +143,10 @@ static int t128_bus_reset(Scsi_Cmnd *);
 #define NCR5380_bus_reset t128_bus_reset
 #define NCR5380_proc_info t128_proc_info
 
-/* 15 14 12 10 7 5 3 
+/* 15 14 12 10 7 5 3
    1101 0100 1010 1000 */
-   
-#define T128_IRQS 0xc4a8 
+
+#define T128_IRQS 0xc4a8
 
 #endif /* else def HOSTS_C */
 #endif /* ndef ASM */
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/serial/8250_exar_st16c554.c
new file mode 100644 (file)
index 0000000..567143a
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  linux/drivers/serial/8250_exar.c
+ *
+ *  Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
+ *  Based on 8250_boca.
+ *
+ *  Copyright (C) 2005 Russell King.
+ *  Data taken from include/asm-i386/serial.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+
+#define PORT(_base,_irq)                               \
+       {                                               \
+               .iobase         = _base,                \
+               .irq            = _irq,                 \
+               .uartclk        = 1843200,              \
+               .iotype         = UPIO_PORT,            \
+               .flags          = UPF_BOOT_AUTOCONF,    \
+       }
+
+static struct plat_serial8250_port exar_data[] = {
+       PORT(0x100, 5),
+       PORT(0x108, 5),
+       PORT(0x110, 5),
+       PORT(0x118, 5),
+       { },
+};
+
+static struct platform_device exar_device = {
+       .name                   = "serial8250",
+       .id                     = PLAT8250_DEV_EXAR_ST16C554,
+       .dev                    = {
+               .platform_data  = exar_data,
+       },
+};
+
+static int __init exar_init(void)
+{
+       return platform_device_register(&exar_device);
+}
+
+module_init(exar_init);
+
+MODULE_AUTHOR("Paul B Schroeder");
+MODULE_DESCRIPTION("8250 serial probe module for Exar cards");
+MODULE_LICENSE("GPL");
index 71d907c8288b15a69214dc47f19578495da8a33c..d3d6b82706b5edbb548211187e96e28956859a4a 100644 (file)
@@ -464,11 +464,38 @@ static void __devexit serial_pnp_remove(struct pnp_dev *dev)
                serial8250_unregister_port(line - 1);
 }
 
+#ifdef CONFIG_PM
+static int serial_pnp_suspend(struct pnp_dev *dev, pm_message_t state)
+{
+       long line = (long)pnp_get_drvdata(dev);
+
+       if (!line)
+               return -ENODEV;
+       serial8250_suspend_port(line - 1);
+       return 0;
+}
+
+static int serial_pnp_resume(struct pnp_dev *dev)
+{
+       long line = (long)pnp_get_drvdata(dev);
+
+       if (!line)
+               return -ENODEV;
+       serial8250_resume_port(line - 1);
+       return 0;
+}
+#else
+#define serial_pnp_suspend NULL
+#define serial_pnp_resume NULL
+#endif /* CONFIG_PM */
+
 static struct pnp_driver serial_pnp_driver = {
        .name           = "serial",
-       .id_table       = pnp_dev_table,
        .probe          = serial_pnp_probe,
        .remove         = __devexit_p(serial_pnp_remove),
+       .suspend        = serial_pnp_suspend,
+       .resume         = serial_pnp_resume,
+       .id_table       = pnp_dev_table,
 };
 
 static int __init serial8250_pnp_init(void)
index 0b71e7d18903ddd6d36e1d859cde23beee02c891..fc12d5df10e2175f3c6346c7a371ca79da8c6780 100644 (file)
@@ -210,6 +210,17 @@ config SERIAL_8250_BOCA
          To compile this driver as a module, choose M here: the module
          will be called 8250_boca.
 
+config SERIAL_8250_EXAR_ST16C554
+       tristate "Support Exar ST16C554/554D Quad UART"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         The Uplogix Envoy TU301 uses this Exar Quad UART.  If you are
+         tinkering with your Envoy TU301, or have a machine with this UART,
+         say Y here.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_exar_st16c554.
+
 config SERIAL_8250_HUB6
        tristate "Support Hub6 cards"
        depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
@@ -511,6 +522,25 @@ config SERIAL_IMX_CONSOLE
          your boot loader (lilo or loadlin) about how to pass options to the
          kernel at boot time.)
 
+config SERIAL_UARTLITE
+       tristate "Xilinx uartlite serial port support"
+       depends on PPC32
+       select SERIAL_CORE
+       help
+         Say Y here if you want to use the Xilinx uartlite serial controller.
+
+         To compile this driver as a module, choose M here: the
+         module will be called uartlite.ko.
+
+config SERIAL_UARTLITE_CONSOLE
+       bool "Support for console on Xilinx uartlite serial port"
+       depends on SERIAL_UARTLITE=y
+       select SERIAL_CORE_CONSOLE
+       help
+         Say Y here if you wish to use a Xilinx uartlite as the system
+         console (the system console is the device which receives all kernel
+         messages and warnings and which allows logins in single user mode).
+
 config SERIAL_SUNCORE
        bool
        depends on SPARC
index b4d8a7c182e3a7b780cf2a9dfe8c87e4298055f2..df3632cd7df92e449cdc90bc61dcddc06d692209 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
 obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
 obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
 obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
 obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
 obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
 obj-$(CONFIG_SERIAL_8250_AU1X00) += 8250_au1x00.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
 obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
 obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
+obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
index 4213fabc62bf864a033aca5ff1c5b120b830cf57..4d3626ef46438c324a569dd980a6e6b85e51e481 100644 (file)
@@ -129,6 +129,8 @@ static void pl010_rx_chars(struct uart_port *port)
                 */
                rsr = readb(port->membase + UART01x_RSR) | UART_DUMMY_RSR_RX;
                if (unlikely(rsr & UART01x_RSR_ANY)) {
+                       writel(0, port->membase + UART01x_ECR);
+
                        if (rsr & UART01x_RSR_BE) {
                                rsr &= ~(UART01x_RSR_FE | UART01x_RSR_PE);
                                port->icount.brk++;
index b691d3e14754fa94a40ff0eac22889b2e2afda33..787a8f134677c0a1b7f0f8ab17778bb267dd247a 100644 (file)
@@ -282,7 +282,7 @@ void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
 }
 
 /* Setup any dynamic params in the uart desc */
-int cpm_uart_init_portdesc(void)
+int __init cpm_uart_init_portdesc(void)
 {
 #if defined(CONFIG_SERIAL_CPM_SMC1) || defined(CONFIG_SERIAL_CPM_SMC2)
        u32 addr;
index 53662b33b84164c9f023033a3cdbb7d4dcd5b429..af1544f3356f7656376a01786436fde357f74c99 100644 (file)
@@ -1,11 +1,13 @@
 /*
- * dz.c: Serial port driver for DECStations equiped
+ * dz.c: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif
  *
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
+ *
  * [31-AUG-98] triemer
  * Changed IRQ to use Harald's dec internals interrupts.h
  * removed base_addr code - moving address assignment to setup.c
 
 #undef DEBUG_DZ
 
+#if defined(CONFIG_SERIAL_DZ_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/console.h>
+#include <linux/sysrq.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
-#define CONSOLE_LINE (3)       /* for definition of struct console */
-
 #include "dz.h"
 
-#define DZ_INTR_DEBUG 1
-
 static char *dz_name = "DECstation DZ serial driver version ";
-static char *dz_version = "1.02";
+static char *dz_version = "1.03";
 
 struct dz_port {
        struct uart_port        port;
@@ -61,22 +65,6 @@ struct dz_port {
 
 static struct dz_port dz_ports[DZ_NB_PORT];
 
-#ifdef DEBUG_DZ
-/*
- * debugging code to send out chars via prom
- */
-static void debug_console(const char *s, int count)
-{
-       unsigned i;
-
-       for (i = 0; i < count; i++) {
-               if (*s == 10)
-                       prom_printf("%c", 13);
-               prom_printf("%c", *s++);
-       }
-}
-#endif
-
 /*
  * ------------------------------------------------------------
  * dz_in () and dz_out ()
@@ -90,6 +78,7 @@ static inline unsigned short dz_in(struct dz_port *dport, unsigned offset)
 {
        volatile unsigned short *addr =
                (volatile unsigned short *) (dport->port.membase + offset);
+
        return *addr;
 }
 
@@ -98,6 +87,7 @@ static inline void dz_out(struct dz_port *dport, unsigned offset,
 {
        volatile unsigned short *addr =
                (volatile unsigned short *) (dport->port.membase + offset);
+
        *addr = value;
 }
 
@@ -144,7 +134,7 @@ static void dz_stop_rx(struct uart_port *uport)
 
        spin_lock_irqsave(&dport->port.lock, flags);
        dport->cflag &= ~DZ_CREAD;
-       dz_out(dport, DZ_LPR, dport->cflag);
+       dz_out(dport, DZ_LPR, dport->cflag | dport->port.line);
        spin_unlock_irqrestore(&dport->port.lock, flags);
 }
 
@@ -155,14 +145,14 @@ static void dz_enable_ms(struct uart_port *port)
 
 /*
  * ------------------------------------------------------------
- * Here starts the interrupt handling routines.  All of the
- * following subroutines are declared as inline and are folded
- * into dz_interrupt.  They were separated out for readability's
- * sake.
  *
- * Note: rs_interrupt() is a "fast" interrupt, which means that it
+ * Here start the interrupt handling routines.  All of the following
+ * subroutines are declared as inline and are folded into
+ * dz_interrupt.  They were separated out for readability's sake.
+ *
+ * Note: dz_interrupt() is a "fast" interrupt, which means that it
  * runs with interrupts turned off.  People who may want to modify
- * rs_interrupt() should try to keep the interrupt handler as fast as
+ * dz_interrupt() should try to keep the interrupt handler as fast as
  * possible.  After you are done making modifications, it is not a bad
  * idea to do:
  *
@@ -180,92 +170,74 @@ static void dz_enable_ms(struct uart_port *port)
  * This routine deals with inputs from any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_receive_chars(struct dz_port *dport)
+static inline void dz_receive_chars(struct dz_port *dport_in,
+                                   struct pt_regs *regs)
 {
+       struct dz_port *dport;
        struct tty_struct *tty = NULL;
        struct uart_icount *icount;
-       int ignore = 0;
-       unsigned short status, tmp;
+       int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
+       unsigned short status;
        unsigned char ch, flag;
+       int i;
 
-       /* this code is going to be a problem...
-          the call to tty_flip_buffer is going to need
-          to be rethought...
-        */
-       do {
-               status = dz_in(dport, DZ_RBUF);
-
-               /* punt so we don't get duplicate characters */
-               if (!(status & DZ_DVAL))
-                       goto ignore_char;
-
-
-               ch = UCHAR(status);     /* grab the char */
-               flag = TTY_NORMAL;
+       while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) {
+               dport = &dz_ports[LINE(status)];
+               tty = dport->port.info->tty;    /* point to the proper dev */
 
-#if 0
-               if (info->is_console) {
-                       if (ch == 0)
-                               return;         /* it's a break ... */
-               }
-#endif
+               ch = UCHAR(status);             /* grab the char */
 
-               tty = dport->port.info->tty;/* now tty points to the proper dev */
                icount = &dport->port.icount;
-
-               if (!tty)
-                       break;
-
                icount->rx++;
 
-               /* keep track of the statistics */
-               if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) {
-                       if (status & DZ_PERR)   /* parity error */
-                               icount->parity++;
-                       else if (status & DZ_FERR)      /* frame error */
-                               icount->frame++;
-                       if (status & DZ_OERR)   /* overrun error */
-                               icount->overrun++;
-
-                       /*  check to see if we should ignore the character
-                          and mask off conditions that should be ignored
+               flag = TTY_NORMAL;
+               if (status & DZ_FERR) {         /* frame error */
+                       /*
+                        * There is no separate BREAK status bit, so
+                        * treat framing errors as BREAKs for Magic SysRq
+                        * and SAK; normally, otherwise.
                         */
-
-                       if (status & dport->port.ignore_status_mask) {
-                               if (++ignore > 100)
-                                       break;
-                               goto ignore_char;
-                       }
-                       /* mask off the error conditions we want to ignore */
-                       tmp = status & dport->port.read_status_mask;
-
-                       if (tmp & DZ_PERR) {
-                               flag = TTY_PARITY;
-#ifdef DEBUG_DZ
-                               debug_console("PERR\n", 5);
-#endif
-                       } else if (tmp & DZ_FERR) {
+                       if (uart_handle_break(&dport->port))
+                               continue;
+                       if (dport->port.flags & UPF_SAK)
+                               flag = TTY_BREAK;
+                       else
                                flag = TTY_FRAME;
-#ifdef DEBUG_DZ
-                               debug_console("FERR\n", 5);
-#endif
-                       }
-                       if (tmp & DZ_OERR) {
-#ifdef DEBUG_DZ
-                               debug_console("OERR\n", 5);
-#endif
-                               tty_insert_flip_char(tty, ch, flag);
-                               ch = 0;
-                               flag = TTY_OVERRUN;
-                       }
+               } else if (status & DZ_OERR)    /* overrun error */
+                       flag = TTY_OVERRUN;
+               else if (status & DZ_PERR)      /* parity error */
+                       flag = TTY_PARITY;
+
+               /* keep track of the statistics */
+               switch (flag) {
+               case TTY_FRAME:
+                       icount->frame++;
+                       break;
+               case TTY_PARITY:
+                       icount->parity++;
+                       break;
+               case TTY_OVERRUN:
+                       icount->overrun++;
+                       break;
+               case TTY_BREAK:
+                       icount->brk++;
+                       break;
+               default:
+                       break;
                }
-               tty_insert_flip_char(tty, ch, flag);
-             ignore_char:
-                       ;
-       } while (status & DZ_DVAL);
 
-       if (tty)
-               tty_flip_buffer_push(tty);
+               if (uart_handle_sysrq_char(&dport->port, ch, regs))
+                       continue;
+
+               if ((status & dport->port.ignore_status_mask) == 0) {
+                       uart_insert_char(&dport->port,
+                                        status, DZ_OERR, ch, flag);
+                       lines_rx[LINE(status)] = 1;
+               }
+       }
+       for (i = 0; i < DZ_NB_PORT; i++)
+               if (lines_rx[i])
+                       tty_flip_buffer_push(dz_ports[i].port.info->tty);
 }
 
 /*
@@ -275,26 +247,32 @@ static inline void dz_receive_chars(struct dz_port *dport)
  * This routine deals with outputs to any lines.
  * ------------------------------------------------------------
  */
-static inline void dz_transmit_chars(struct dz_port *dport)
+static inline void dz_transmit_chars(struct dz_port *dport_in)
 {
-       struct circ_buf *xmit = &dport->port.info->xmit;
+       struct dz_port *dport;
+       struct circ_buf *xmit;
+       unsigned short status;
        unsigned char tmp;
 
-       if (dport->port.x_char) {       /* XON/XOFF chars */
+       status = dz_in(dport_in, DZ_CSR);
+       dport = &dz_ports[LINE(status)];
+       xmit = &dport->port.info->xmit;
+
+       if (dport->port.x_char) {               /* XON/XOFF chars */
                dz_out(dport, DZ_TDR, dport->port.x_char);
                dport->port.icount.tx++;
                dport->port.x_char = 0;
                return;
        }
-       /* if nothing to do or stopped or hardware stopped */
+       /* If nothing to do or stopped or hardware stopped. */
        if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
                dz_stop_tx(&dport->port);
                return;
        }
 
        /*
-        * if something to do ... (rember the dz has no output fifo so we go
-        * one char at a time :-<
+        * If something to do... (remember the dz has no output fifo,
+        * so we go one char at a time) :-<
         */
        tmp = xmit->buf[xmit->tail];
        xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
@@ -304,23 +282,29 @@ static inline void dz_transmit_chars(struct dz_port *dport)
        if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
                uart_write_wakeup(&dport->port);
 
-       /* Are we done */
+       /* Are we are done. */
        if (uart_circ_empty(xmit))
                dz_stop_tx(&dport->port);
 }
 
 /*
  * ------------------------------------------------------------
- * check_modem_status ()
+ * check_modem_status()
  *
- * Only valid for the MODEM line duh !
+ * DS 3100 & 5100: Only valid for the MODEM line, duh!
+ * DS 5000/200: Valid for the MODEM and PRINTER line.
  * ------------------------------------------------------------
  */
 static inline void check_modem_status(struct dz_port *dport)
 {
+       /*
+        * FIXME:
+        * 1. No status change interrupt; use a timer.
+        * 2. Handle the 3100/5000 as appropriate. --macro
+        */
        unsigned short status;
 
-       /* if not ne modem line just return */
+       /* If not the modem line just return.  */
        if (dport->port.line != DZ_MODEM)
                return;
 
@@ -341,21 +325,18 @@ static inline void check_modem_status(struct dz_port *dport)
  */
 static irqreturn_t dz_interrupt(int irq, void *dev)
 {
-       struct dz_port *dport;
+       struct dz_port *dport = (struct dz_port *)dev;
        unsigned short status;
 
        /* get the reason why we just got an irq */
-       status = dz_in((struct dz_port *)dev, DZ_CSR);
-       dport = &dz_ports[LINE(status)];
+       status = dz_in(dport, DZ_CSR);
 
-       if (status & DZ_RDONE)
-               dz_receive_chars(dport);
+       if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
+               dz_receive_chars(dport, regs);
 
-       if (status & DZ_TRDY)
+       if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
                dz_transmit_chars(dport);
 
-       /* FIXME: what about check modem status??? --rmk */
-
        return IRQ_HANDLED;
 }
 
@@ -367,13 +348,13 @@ static irqreturn_t dz_interrupt(int irq, void *dev)
 
 static unsigned int dz_get_mctrl(struct uart_port *uport)
 {
+       /*
+        * FIXME: Handle the 3100/5000 as appropriate. --macro
+        */
        struct dz_port *dport = (struct dz_port *)uport;
        unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
 
        if (dport->port.line == DZ_MODEM) {
-               /*
-                * CHECKME: This is a guess from the other code... --rmk
-                */
                if (dz_in(dport, DZ_MSR) & DZ_MODEM_DSR)
                        mctrl &= ~TIOCM_DSR;
        }
@@ -383,6 +364,9 @@ static unsigned int dz_get_mctrl(struct uart_port *uport)
 
 static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
 {
+       /*
+        * FIXME: Handle the 3100/5000 as appropriate. --macro
+        */
        struct dz_port *dport = (struct dz_port *)uport;
        unsigned short tmp;
 
@@ -409,13 +393,6 @@ static int dz_startup(struct uart_port *uport)
        unsigned long flags;
        unsigned short tmp;
 
-       /* The dz lines for the mouse/keyboard must be
-        * opened using their respective drivers.
-        */
-       if ((dport->port.line == DZ_KEYBOARD) ||
-           (dport->port.line == DZ_MOUSE))
-               return -ENODEV;
-
        spin_lock_irqsave(&dport->port.lock, flags);
 
        /* enable the interrupt and the scanning */
@@ -442,7 +419,8 @@ static void dz_shutdown(struct uart_port *uport)
 }
 
 /*
- * get_lsr_info - get line status register info
+ * -------------------------------------------------------------------
+ * dz_tx_empty() -- get the transmitter empty status
  *
  * Purpose: Let user call ioctl() to get info when the UART physically
  *          is emptied.  On bus types like RS485, the transmitter must
@@ -450,21 +428,28 @@ static void dz_shutdown(struct uart_port *uport)
  *          the transmit shift register is empty, not be done when the
  *          transmit holding register is empty.  This functionality
  *          allows an RS485 driver to be written in user space.
+ * -------------------------------------------------------------------
  */
 static unsigned int dz_tx_empty(struct uart_port *uport)
 {
        struct dz_port *dport = (struct dz_port *)uport;
-       unsigned short status = dz_in(dport, DZ_LPR);
+       unsigned short tmp, mask = 1 << dport->port.line;
 
-       /* FIXME: this appears to be obviously broken --rmk. */
-       return status ? TIOCSER_TEMT : 0;
+       tmp = dz_in(dport, DZ_TCR);
+       tmp &= mask;
+
+       return tmp ? 0 : TIOCSER_TEMT;
 }
 
 static void dz_break_ctl(struct uart_port *uport, int break_state)
 {
+       /*
+        * FIXME: Can't access BREAK bits in TDR easily;
+        * reuse the code for polled TX. --macro
+        */
        struct dz_port *dport = (struct dz_port *)uport;
        unsigned long flags;
-       unsigned short tmp, mask = 1 << uport->line;
+       unsigned short tmp, mask = 1 << dport->port.line;
 
        spin_lock_irqsave(&uport->lock, flags);
        tmp = dz_in(dport, DZ_TCR);
@@ -561,7 +546,7 @@ static void dz_set_termios(struct uart_port *uport, struct termios *termios,
 
        spin_lock_irqsave(&dport->port.lock, flags);
 
-       dz_out(dport, DZ_LPR, cflag);
+       dz_out(dport, DZ_LPR, cflag | dport->port.line);
        dport->cflag = cflag;
 
        /* setup accept flag */
@@ -650,7 +635,7 @@ static void __init dz_init_ports(void)
        for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) {
                spin_lock_init(&dport->port.lock);
                dport->port.membase     = (char *) base;
-               dport->port.iotype      = UPIO_PORT;
+               dport->port.iotype      = UPIO_MEM;
                dport->port.irq         = dec_interrupt[DEC_IRQ_DZ11];
                dport->port.line        = i;
                dport->port.fifosize    = 1;
@@ -662,10 +647,7 @@ static void __init dz_init_ports(void)
 static void dz_reset(struct dz_port *dport)
 {
        dz_out(dport, DZ_CSR, DZ_CLR);
-
        while (dz_in(dport, DZ_CSR) & DZ_CLR);
-               /* FIXME: cpu_relax? */
-
        iob();
 
        /* enable scanning */
@@ -673,26 +655,55 @@ static void dz_reset(struct dz_port *dport)
 }
 
 #ifdef CONFIG_SERIAL_DZ_CONSOLE
+/*
+ * -------------------------------------------------------------------
+ * dz_console_putchar() -- transmit a character
+ *
+ * Polled transmission.  This is tricky.  We need to mask transmit
+ * interrupts so that they do not interfere, enable the transmitter
+ * for the line requested and then wait till the transmit scanner
+ * requests data for this line.  But it may request data for another
+ * line first, in which case we have to disable its transmitter and
+ * repeat waiting till our line pops up.  Only then the character may
+ * be transmitted.  Finally, the state of the transmitter mask is
+ * restored.  Welcome to the world of PDP-11!
+ * -------------------------------------------------------------------
+ */
 static void dz_console_putchar(struct uart_port *uport, int ch)
 {
        struct dz_port *dport = (struct dz_port *)uport;
        unsigned long flags;
-       int loops = 2500;
-       unsigned short tmp = (unsigned char)ch;
-       /* this code sends stuff out to serial device - spinning its
-          wheels and waiting. */
+       unsigned short csr, tcr, trdy, mask;
+       int loops = 10000;
 
        spin_lock_irqsave(&dport->port.lock, flags);
+       csr = dz_in(dport, DZ_CSR);
+       dz_out(dport, DZ_CSR, csr & ~DZ_TIE);
+       tcr = dz_in(dport, DZ_TCR);
+       tcr |= 1 << dport->port.line;
+       mask = tcr;
+       dz_out(dport, DZ_TCR, mask);
+       iob();
+       spin_unlock_irqrestore(&dport->port.lock, flags);
 
-       /* spin our wheels */
-       while (((dz_in(dport, DZ_CSR) & DZ_TRDY) != DZ_TRDY) && loops--)
-               /* FIXME: cpu_relax, udelay? --rmk */
-               ;
+       while (loops--) {
+               trdy = dz_in(dport, DZ_CSR);
+               if (!(trdy & DZ_TRDY))
+                       continue;
+               trdy = (trdy & DZ_TLINE) >> 8;
+               if (trdy == dport->port.line)
+                       break;
+               mask &= ~(1 << trdy);
+               dz_out(dport, DZ_TCR, mask);
+               iob();
+               udelay(2);
+       }
 
-       /* Actually transmit the character. */
-       dz_out(dport, DZ_TDR, tmp);
+       if (loops)                              /* Cannot send otherwise. */
+               dz_out(dport, DZ_TDR, ch);
 
-       spin_unlock_irqrestore(&dport->port.lock, flags);
+       dz_out(dport, DZ_TCR, tcr);
+       dz_out(dport, DZ_CSR, csr);
 }
 
 /*
@@ -703,11 +714,11 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
  * The console must be locked when we get here.
  * -------------------------------------------------------------------
  */
-static void dz_console_print(struct console *cons,
+static void dz_console_print(struct console *co,
                             const char *str,
                             unsigned int count)
 {
-       struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+       struct dz_port *dport = &dz_ports[co->index];
 #ifdef DEBUG_DZ
        prom_printf((char *) str);
 #endif
@@ -716,49 +727,43 @@ static void dz_console_print(struct console *cons,
 
 static int __init dz_console_setup(struct console *co, char *options)
 {
-       struct dz_port *dport = &dz_ports[CONSOLE_LINE];
+       struct dz_port *dport = &dz_ports[co->index];
        int baud = 9600;
        int bits = 8;
        int parity = 'n';
        int flow = 'n';
-       int ret;
-       unsigned short mask, tmp;
 
        if (options)
                uart_parse_options(options, &baud, &parity, &bits, &flow);
 
        dz_reset(dport);
 
-       ret = uart_set_options(&dport->port, co, baud, parity, bits, flow);
-       if (ret == 0) {
-               mask = 1 << dport->port.line;
-               tmp = dz_in(dport, DZ_TCR);     /* read the TX flag */
-               if (!(tmp & mask)) {
-                       tmp |= mask;            /* set the TX flag */
-                       dz_out(dport, DZ_TCR, tmp);
-               }
-       }
-
-       return ret;
+       return uart_set_options(&dport->port, co, baud, parity, bits, flow);
 }
 
-static struct console dz_sercons =
-{
+static struct uart_driver dz_reg;
+static struct console dz_sercons = {
        .name   = "ttyS",
        .write  = dz_console_print,
        .device = uart_console_device,
        .setup  = dz_console_setup,
-       .flags  = CON_CONSDEV | CON_PRINTBUFFER,
-       .index  = CONSOLE_LINE,
+       .flags  = CON_PRINTBUFFER,
+       .index  = -1,
+       .data   = &dz_reg,
 };
 
-void __init dz_serial_console_init(void)
+static int __init dz_serial_console_init(void)
 {
-       dz_init_ports();
-
-       register_console(&dz_sercons);
+       if (!IOASIC) {
+               dz_init_ports();
+               register_console(&dz_sercons);
+               return 0;
+       } else
+               return -ENXIO;
 }
 
+console_initcall(dz_serial_console_init);
+
 #define SERIAL_DZ_CONSOLE      &dz_sercons
 #else
 #define SERIAL_DZ_CONSOLE      NULL
@@ -767,35 +772,29 @@ void __init dz_serial_console_init(void)
 static struct uart_driver dz_reg = {
        .owner                  = THIS_MODULE,
        .driver_name            = "serial",
-       .dev_name               = "ttyS%d",
+       .dev_name               = "ttyS",
        .major                  = TTY_MAJOR,
        .minor                  = 64,
        .nr                     = DZ_NB_PORT,
        .cons                   = SERIAL_DZ_CONSOLE,
 };
 
-int __init dz_init(void)
+static int __init dz_init(void)
 {
-       unsigned long flags;
        int ret, i;
 
+       if (IOASIC)
+               return -ENXIO;
+
        printk("%s%s\n", dz_name, dz_version);
 
        dz_init_ports();
 
-       save_flags(flags);
-       cli();
-
 #ifndef CONFIG_SERIAL_DZ_CONSOLE
        /* reset the chip */
        dz_reset(&dz_ports[0]);
 #endif
 
-       /* order matters here... the trick is that flags
-          is updated... in request_irq - to immediatedly obliterate
-          it is unwise. */
-       restore_flags(flags);
-
        if (request_irq(dz_ports[0].port.irq, dz_interrupt,
                        IRQF_DISABLED, "DZ", &dz_ports[0]))
                panic("Unable to register DZ interrupt");
@@ -810,5 +809,7 @@ int __init dz_init(void)
        return ret;
 }
 
+module_init(dz_init);
+
 MODULE_DESCRIPTION("DECstation DZ serial driver");
 MODULE_LICENSE("GPL");
index 86ef417382bbbe938b54ece6cdc83ca03b69cbb6..9674d4e4987251ac72e1e7c45e88e0e70228b638 100644 (file)
@@ -1,20 +1,22 @@
 /*
- * dz.h: Serial port driver for DECStations equiped 
+ * dz.h: Serial port driver for DECstations equipped
  *       with the DZ chipset.
  *
  * Copyright (C) 1998 Olivier A. D. Lebaillif 
  *             
  * Email: olivier.lebaillif@ifrsys.com
  *
+ * Copyright (C) 2004, 2006  Maciej W. Rozycki
  */
 #ifndef DZ_SERIAL_H
 #define DZ_SERIAL_H
 
 /*
- * Definitions for the Control and Status Received.
+ * Definitions for the Control and Status Register.
  */
 #define DZ_TRDY        0x8000                 /* Transmitter empty */
-#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enable */
+#define DZ_TIE         0x4000                 /* Transmitter Interrupt Enbl */
+#define DZ_TLINE       0x0300                 /* Transmitter Line Number */
 #define DZ_RDONE       0x0080                 /* Receiver data ready */
 #define DZ_RIE         0x0040                 /* Receive Interrupt Enable */
 #define DZ_MSE         0x0020                 /* Master Scan Enable */
 #define DZ_MAINT       0x0008                 /* Loop Back Mode */
 
 /*
- * Definitions for the Received buffer. 
+ * Definitions for the Receiver Buffer Register.
  */
-#define DZ_RBUF_MASK   0x00FF                 /* Data Mask in the Receive Buffer */
-#define DZ_LINE_MASK   0x0300                 /* Line Mask in the Receive Buffer */
+#define DZ_RBUF_MASK   0x00FF                 /* Data Mask */
+#define DZ_LINE_MASK   0x0300                 /* Line Mask */
 #define DZ_DVAL        0x8000                 /* Valid Data indicator */
 #define DZ_OERR        0x4000                 /* Overrun error indicator */
 #define DZ_FERR        0x2000                 /* Frame error indicator */
 #define DZ_PERR        0x1000                 /* Parity error indicator */
 
-#define LINE(x) (x & DZ_LINE_MASK) >> 8       /* Get the line number from the input buffer */
-#define UCHAR(x) (unsigned char)(x & DZ_RBUF_MASK)
+#define LINE(x) ((x & DZ_LINE_MASK) >> 8)     /* Get the line number
+                                                 from the input buffer */
+#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
 
 /*
- * Definitions for the Transmit Register.
+ * Definitions for the Transmit Control Register.
  */
 #define DZ_LINE_KEYBOARD 0x0001
 #define DZ_LINE_MOUSE    0x0002
 #define DZ_LINE_MODEM    0x0004
 #define DZ_LINE_PRINTER  0x0008
 
+#define DZ_MODEM_RTS     0x0800               /* RTS for the modem line (2) */
 #define DZ_MODEM_DTR     0x0400               /* DTR for the modem line (2) */
+#define DZ_PRINT_RTS     0x0200               /* RTS for the prntr line (3) */
+#define DZ_PRINT_DTR     0x0100               /* DTR for the prntr line (3) */
+#define DZ_LNENB         0x000f               /* Transmitter Line Enable */
 
 /*
  * Definitions for the Modem Status Register.
  */
+#define DZ_MODEM_RI      0x0800               /* RI for the modem line (2) */
+#define DZ_MODEM_CD      0x0400               /* CD for the modem line (2) */
 #define DZ_MODEM_DSR     0x0200               /* DSR for the modem line (2) */
+#define DZ_MODEM_CTS     0x0100               /* CTS for the modem line (2) */
+#define DZ_PRINT_RI      0x0008               /* RI for the printer line (3) */
+#define DZ_PRINT_CD      0x0004               /* CD for the printer line (3) */
+#define DZ_PRINT_DSR     0x0002               /* DSR for the prntr line (3) */
+#define DZ_PRINT_CTS     0x0001               /* CTS for the prntr line (3) */
 
 /*
  * Definitions for the Transmit Data Register.
index aee1b31f1a1c4c6f3456694ccf7a8272b80744a8..3db206d29b338b36df60bd6920872fcdb6d4702b 100644 (file)
@@ -60,7 +60,8 @@ struct timer_list mcfrs_timer_struct;
 #if defined(CONFIG_HW_FEITH)
 #define        CONSOLE_BAUD_RATE       38400
 #define        DEFAULT_CBAUD           B38400
-#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB)
+#elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \
+      defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO)
 #define CONSOLE_BAUD_RATE      115200
 #define DEFAULT_CBAUD          B115200
 #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \
@@ -109,12 +110,30 @@ static struct mcf_serial mcfrs_table[] = {
                .irq = IRQBASE,
                .flags = ASYNC_BOOT_AUTOCONF,
        },
+#ifdef MCFUART_BASE2
        {  /* ttyS1 */
                .magic = 0,
                .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2),
                .irq = IRQBASE+1,
                .flags = ASYNC_BOOT_AUTOCONF,
        },
+#endif
+#ifdef MCFUART_BASE3
+       {  /* ttyS2 */
+               .magic = 0,
+               .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3),
+               .irq = IRQBASE+2,
+               .flags = ASYNC_BOOT_AUTOCONF,
+       },
+#endif
+#ifdef MCFUART_BASE4
+       {  /* ttyS3 */
+               .magic = 0,
+               .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4),
+               .irq = IRQBASE+3,
+               .flags = ASYNC_BOOT_AUTOCONF,
+       },
+#endif
 };
 
 
@@ -1516,6 +1535,22 @@ static void mcfrs_irqinit(struct mcf_serial *info)
        imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 +
                MCFINTC_IMRL);
        *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1);
+#if defined(CONFIG_M527x)
+       {
+               /*
+                * External Pin Mask Setting & Enable External Pin for Interface
+                * mrcbis@aliceposta.it
+                */
+               unsigned short *serpin_enable_mask;
+               serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART);
+               if (info->line == 0)
+                       *serpin_enable_mask |= UART0_ENABLE_MASK;
+               else if (info->line == 1)
+                       *serpin_enable_mask |= UART1_ENABLE_MASK;
+               else if (info->line == 2)
+                       *serpin_enable_mask |= UART2_ENABLE_MASK;
+       }
+#endif
 #elif defined(CONFIG_M520x)
        volatile unsigned char *icrp, *uartp;
        volatile unsigned long *imrp;
@@ -1713,7 +1748,7 @@ mcfrs_init(void)
        /* Initialize the tty_driver structure */
        mcfrs_serial_driver->owner = THIS_MODULE;
        mcfrs_serial_driver->name = "ttyS";
-       mcfrs_serial_driver->driver_name = "serial";
+       mcfrs_serial_driver->driver_name = "mcfserial";
        mcfrs_serial_driver->major = TTY_MAJOR;
        mcfrs_serial_driver->minor_start = 64;
        mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1797,10 +1832,23 @@ void mcfrs_init_console(void)
        uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8;
        uartp[MCFUART_UMR] = MCFUART_MR2_STOP1;
 
+#ifdef CONFIG_M5272
+{
+       /*
+        * For the MCF5272, also compute the baudrate fraction.
+        */
+       int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud);
+       fraction *= 16;
+       fraction /= (32 * mcfrs_console_baud);
+       uartp[MCFUART_UFPD] = (fraction & 0xf);         /* set fraction */
+       clk = (MCF_BUSCLK / mcfrs_console_baud) / 32;
+}
+#else
        clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */
+#endif
+
        uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8;  /* set msb baud */
        uartp[MCFUART_UBG2] = (clk & 0xff);  /* set lsb baud */
-
        uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER;
        uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE;
 
index 4f80c5b4a7533590ad6a51a22708d868fb43fdec..6dd579ed977782d66e26465806afe5ee97f9701d 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * drivers/serial/mpc52xx_uart.c
- *
  * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
  *
  * FIXME According to the usermanual the status bits in the status register
  *
  *
  * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- * 
+ *
  * Some of the code has been inspired/copied from the 2.4 code written
  * by Dale Farnsworth <dfarnsworth@mvista.com>.
- * 
- * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2006 Secret Lab Technologies Ltd.
+ *                    Grant Likely <grant.likely@secretlab.ca>
+ * Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
  * Copyright (C) 2003 MontaVista, Software, Inc.
- * 
+ *
  * This file is licensed under the terms of the GNU General Public License
  * version 2. This program is licensed "as is" without any warranty of any
  * kind, whether express or implied.
  */
+
 /* Platform device Usage :
  *
  * Since PSCs can have multiple function, the correct driver for each one
  * will be mapped to.
  */
 
-#include <linux/platform_device.h>
+/* OF Platform device Usage :
+ *
+ * This driver is only used for PSCs configured in uart mode.  The device
+ * tree will have a node for each PSC in uart mode w/ device_type = "serial"
+ * and "mpc52xx-psc-uart" in the compatible string
+ *
+ * By default, PSC devices are enumerated in the order they are found.  However
+ * a particular PSC number can be forces by adding 'device_no = <port#>'
+ * to the device node.
+ *
+ * The driver init all necessary registers to place the PSC in uart mode without
+ * DCD. However, the pin multiplexing aren't changed and should be set either
+ * by the bootloader or in the platform init code.
+ */
+
+#undef DEBUG
+
+#include <linux/device.h>
 #include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/serial.h>
 #include <asm/delay.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_PPC_MERGE)
+#include <asm/of_platform.h>
+#else
+#include <linux/platform_device.h>
+#endif
+
 #include <asm/mpc52xx.h>
 #include <asm/mpc52xx_psc.h>
 
@@ -80,6 +103,12 @@ static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM];
         *        it's cleared, then a memset(...,0,...) should be added to
         *        the console_init
         */
+#if defined(CONFIG_PPC_MERGE)
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static void mpc52xx_uart_of_enumerate(void);
+#endif
 
 #define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
 
@@ -96,32 +125,40 @@ static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id);
 #define uart_console(port)     (0)
 #endif
 
+#if defined(CONFIG_PPC_MERGE)
+static struct of_device_id mpc52xx_uart_of_match[] = {
+       { .type = "serial", .compatible = "mpc52xx-psc-uart", },
+       { .type = "serial", .compatible = "mpc5200-psc", }, /* Efika only! */
+       {},
+};
+#endif
+
 
 /* ======================================================================== */
 /* UART operations                                                          */
 /* ======================================================================== */
 
-static unsigned int 
+static unsigned int
 mpc52xx_uart_tx_empty(struct uart_port *port)
 {
        int status = in_be16(&PSC(port)->mpc52xx_psc_status);
        return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
 }
 
-static void 
+static void
 mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
        /* Not implemented */
 }
 
-static unsigned int 
+static unsigned int
 mpc52xx_uart_get_mctrl(struct uart_port *port)
 {
        /* Not implemented */
        return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
 }
 
-static void 
+static void
 mpc52xx_uart_stop_tx(struct uart_port *port)
 {
        /* port->lock taken by caller */
@@ -129,7 +166,7 @@ mpc52xx_uart_stop_tx(struct uart_port *port)
        out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
 }
 
-static void 
+static void
 mpc52xx_uart_start_tx(struct uart_port *port)
 {
        /* port->lock taken by caller */
@@ -137,12 +174,12 @@ mpc52xx_uart_start_tx(struct uart_port *port)
        out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
 }
 
-static void 
+static void
 mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
 {
        unsigned long flags;
        spin_lock_irqsave(&port->lock, flags);
-       
+
        port->x_char = ch;
        if (ch) {
                /* Make sure tx interrupts are on */
@@ -150,7 +187,7 @@ mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
                port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
                out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask);
        }
-       
+
        spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -178,7 +215,7 @@ mpc52xx_uart_break_ctl(struct uart_port *port, int ctl)
                out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK);
        else
                out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK);
-       
+
        spin_unlock_irqrestore(&port->lock, flags);
 }
 
@@ -197,11 +234,11 @@ mpc52xx_uart_startup(struct uart_port *port)
        /* Reset/activate the port, clear and enable interrupts */
        out_8(&psc->command,MPC52xx_PSC_RST_RX);
        out_8(&psc->command,MPC52xx_PSC_RST_TX);
-       
+
        out_be32(&psc->sicr,0); /* UART mode DCD ignored */
 
        out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-       
+
        out_8(&psc->rfcntl, 0x00);
        out_be16(&psc->rfalarm, 0x1ff);
        out_8(&psc->tfcntl, 0x07);
@@ -209,10 +246,10 @@ mpc52xx_uart_startup(struct uart_port *port)
 
        port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
        out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
-       
+
        out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
        out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
-               
+
        return 0;
 }
 
@@ -220,19 +257,19 @@ static void
 mpc52xx_uart_shutdown(struct uart_port *port)
 {
        struct mpc52xx_psc __iomem *psc = PSC(port);
-       
+
        /* Shut down the port, interrupt and all */
        out_8(&psc->command,MPC52xx_PSC_RST_RX);
        out_8(&psc->command,MPC52xx_PSC_RST_TX);
-       
-       port->read_status_mask = 0; 
+
+       port->read_status_mask = 0;
        out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask);
 
        /* Release interrupt */
        free_irq(port->irq, port);
 }
 
-static void 
+static void
 mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
                          struct termios *old)
 {
@@ -241,10 +278,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
        unsigned char mr1, mr2;
        unsigned short ctr;
        unsigned int j, baud, quot;
-       
+
        /* Prepare what we're gonna write */
        mr1 = 0;
-       
+
        switch (new->c_cflag & CSIZE) {
                case CS5:       mr1 |= MPC52xx_PSC_MODE_5_BITS;
                                break;
@@ -261,8 +298,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
                        MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN;
        } else
                mr1 |= MPC52xx_PSC_MODE_PARNONE;
-       
-       
+
+
        mr2 = 0;
 
        if (new->c_cflag & CSTOPB)
@@ -276,7 +313,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
        baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
        quot = uart_get_divisor(port, baud);
        ctr = quot & 0xffff;
-       
+
        /* Get the lock */
        spin_lock_irqsave(&port->lock, flags);
 
@@ -290,14 +327,14 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
         * boot for the console, all stuff is not yet ready to receive at that
         * time and that just makes the kernel oops */
        /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
-       while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 
+       while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
               --j)
                udelay(1);
 
        if (!j)
                printk( KERN_ERR "mpc52xx_uart.c: "
                        "Unable to flush RX & TX fifos in-time in set_termios."
-                       "Some chars may have been lost.\n" ); 
+                       "Some chars may have been lost.\n" );
 
        /* Reset the TX & RX */
        out_8(&psc->command,MPC52xx_PSC_RST_RX);
@@ -309,7 +346,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new,
        out_8(&psc->mode,mr2);
        out_8(&psc->ctur,ctr >> 8);
        out_8(&psc->ctlr,ctr & 0xff);
-       
+
        /* Reenable TX & RX */
        out_8(&psc->command,MPC52xx_PSC_TX_ENABLE);
        out_8(&psc->command,MPC52xx_PSC_RX_ENABLE);
@@ -332,7 +369,7 @@ mpc52xx_uart_release_port(struct uart_port *port)
                port->membase = NULL;
        }
 
-       release_mem_region(port->mapbase, MPC52xx_PSC_SIZE);
+       release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc));
 }
 
 static int
@@ -341,12 +378,13 @@ mpc52xx_uart_request_port(struct uart_port *port)
        int err;
 
        if (port->flags & UPF_IOREMAP) /* Need to remap ? */
-               port->membase = ioremap(port->mapbase, MPC52xx_PSC_SIZE);
+               port->membase = ioremap(port->mapbase,
+                                       sizeof(struct mpc52xx_psc));
 
        if (!port->membase)
                return -EINVAL;
 
-       err = request_mem_region(port->mapbase, MPC52xx_PSC_SIZE,
+       err = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc),
                        "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY;
 
        if (err && (port->flags & UPF_IOREMAP)) {
@@ -373,7 +411,7 @@ mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser)
 
        if ( (ser->irq != port->irq) ||
             (ser->io_type != SERIAL_IO_MEM) ||
-            (ser->baud_base != port->uartclk)  || 
+            (ser->baud_base != port->uartclk)  ||
             (ser->iomem_base != (void*)port->mapbase) ||
             (ser->hub6 != 0 ) )
                return -EINVAL;
@@ -404,11 +442,11 @@ static struct uart_ops mpc52xx_uart_ops = {
        .verify_port    = mpc52xx_uart_verify_port
 };
 
-       
+
 /* ======================================================================== */
 /* Interrupt handling                                                       */
 /* ======================================================================== */
-       
+
 static inline int
 mpc52xx_uart_int_rx_chars(struct uart_port *port)
 {
@@ -435,11 +473,11 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
 
                flag = TTY_NORMAL;
                port->icount.rx++;
-       
+
                if ( status & (MPC52xx_PSC_SR_PE |
                               MPC52xx_PSC_SR_FE |
                               MPC52xx_PSC_SR_RB) ) {
-                       
+
                        if (status & MPC52xx_PSC_SR_RB) {
                                flag = TTY_BREAK;
                                uart_handle_break(port);
@@ -464,7 +502,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
        }
 
        tty_flip_buffer_push(tty);
-       
+
        return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
 }
 
@@ -509,25 +547,25 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
        return 1;
 }
 
-static irqreturn_t 
+static irqreturn_t
 mpc52xx_uart_int(int irq, void *dev_id)
 {
        struct uart_port *port = dev_id;
        unsigned long pass = ISR_PASS_LIMIT;
        unsigned int keepgoing;
        unsigned short status;
-       
+
        spin_lock(&port->lock);
-       
+
        /* While we have stuff to do, we continue */
        do {
                /* If we don't find anything to do, we stop */
-               keepgoing = 0; 
-               
+               keepgoing = 0;
+
                /* Read status */
                status = in_be16(&PSC(port)->mpc52xx_psc_isr);
                status &= port->read_status_mask;
-                       
+
                /* Do we need to receive chars ? */
                /* For this RX interrupts must be on and some chars waiting */
                if ( status & MPC52xx_PSC_IMR_RXRDY )
@@ -537,15 +575,15 @@ mpc52xx_uart_int(int irq, void *dev_id)
                /* For this, TX must be ready and TX interrupt enabled */
                if ( status & MPC52xx_PSC_IMR_TXRDY )
                        keepgoing |= mpc52xx_uart_int_tx_chars(port);
-               
+
                /* Limit number of iteration */
                if ( !(--pass) )
                        keepgoing = 0;
 
        } while (keepgoing);
-       
+
        spin_unlock(&port->lock);
-       
+
        return IRQ_HANDLED;
 }
 
@@ -563,13 +601,18 @@ mpc52xx_console_get_options(struct uart_port *port,
        struct mpc52xx_psc __iomem *psc = PSC(port);
        unsigned char mr1;
 
+       pr_debug("mpc52xx_console_get_options(port=%p)\n", port);
+
        /* Read the mode registers */
        out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1);
        mr1 = in_8(&psc->mode);
-       
+
        /* CT{U,L}R are write-only ! */
-       *baud = __res.bi_baudrate ?
-               __res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+       *baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+#if !defined(CONFIG_PPC_MERGE)
+       if (__res.bi_baudrate)
+               *baud = __res.bi_baudrate;
+#endif
 
        /* Parse them */
        switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) {
@@ -579,26 +622,26 @@ mpc52xx_console_get_options(struct uart_port *port,
                case MPC52xx_PSC_MODE_8_BITS:
                default:                        *bits = 8;
        }
-       
+
        if (mr1 & MPC52xx_PSC_MODE_PARNONE)
                *parity = 'n';
        else
                *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e';
 }
 
-static void  
+static void
 mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
 {
        struct uart_port *port = &mpc52xx_uart_ports[co->index];
        struct mpc52xx_psc __iomem *psc = PSC(port);
        unsigned int i, j;
-       
+
        /* Disable interrupts */
        out_be16(&psc->mpc52xx_psc_imr, 0);
 
        /* Wait the TX buffer to be empty */
-       j = 5000000;    /* Maximum wait */      
-       while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && 
+       j = 5000000;    /* Maximum wait */
+       while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
               --j)
                udelay(1);
 
@@ -607,13 +650,13 @@ mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
                /* Line return handling */
                if (*s == '\n')
                        out_8(&psc->mpc52xx_psc_buffer_8, '\r');
-               
+
                /* Send the char */
                out_8(&psc->mpc52xx_psc_buffer_8, *s);
 
                /* Wait the TX buffer to be empty */
-               j = 20000;      /* Maximum wait */      
-               while (!(in_be16(&psc->mpc52xx_psc_status) & 
+               j = 20000;      /* Maximum wait */
+               while (!(in_be16(&psc->mpc52xx_psc_status) &
                         MPC52xx_PSC_SR_TXEMP) && --j)
                        udelay(1);
        }
@@ -622,6 +665,7 @@ mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
        out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
 }
 
+#if !defined(CONFIG_PPC_MERGE)
 static int __init
 mpc52xx_console_setup(struct console *co, char *options)
 {
@@ -634,7 +678,7 @@ mpc52xx_console_setup(struct console *co, char *options)
 
        if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM)
                return -EINVAL;
-       
+
        /* Basic port init. Needed since we use some uart_??? func before
         * real init for early access */
        spin_lock_init(&port->lock);
@@ -656,6 +700,78 @@ mpc52xx_console_setup(struct console *co, char *options)
        return uart_set_options(port, co, baud, parity, bits, flow);
 }
 
+#else
+
+static int __init
+mpc52xx_console_setup(struct console *co, char *options)
+{
+       struct uart_port *port = &mpc52xx_uart_ports[co->index];
+       struct device_node *np = mpc52xx_uart_nodes[co->index];
+       unsigned int ipb_freq;
+       struct resource res;
+       int ret;
+
+       int baud = CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
+                co, co->index, options);
+
+       if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
+               pr_debug("PSC%x out of range\n", co->index);
+               return -EINVAL;
+       }
+
+       if (!np) {
+               pr_debug("PSC%x not found in device tree\n", co->index);
+               return -EINVAL;
+       }
+
+       pr_debug("Console on ttyPSC%x is %s\n",
+                co->index, mpc52xx_uart_nodes[co->index]->full_name);
+
+       /* Fetch register locations */
+       if ((ret = of_address_to_resource(np, 0, &res)) != 0) {
+               pr_debug("Could not get resources for PSC%x\n", co->index);
+               return ret;
+       }
+
+       /* Search for bus-frequency property in this node or a parent */
+       if ((ipb_freq = mpc52xx_find_ipb_freq(np)) == 0) {
+               pr_debug("Could not find IPB bus frequency!\n");
+               return -EINVAL;
+       }
+
+       /* Basic port init. Needed since we use some uart_??? func before
+        * real init for early access */
+       spin_lock_init(&port->lock);
+       port->uartclk   = ipb_freq / 2;
+       port->ops       = &mpc52xx_uart_ops;
+       port->mapbase = res.start;
+       port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
+       port->irq = irq_of_parse_and_map(np, 0);
+
+       if (port->membase == NULL)
+               return -EINVAL;
+
+       pr_debug("mpc52xx-psc uart at %lx, mapped to %p, irq=%x, freq=%i\n",
+                port->mapbase, port->membase, port->irq, port->uartclk);
+
+       /* Setup the port parameters accoding to options */
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+       else
+               mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow);
+
+       pr_debug("Setting console parameters: %i %i%c1 flow=%c\n",
+                baud, bits, parity, flow);
+
+       return uart_set_options(port, co, baud, parity, bits, flow);
+}
+#endif /* defined(CONFIG_PPC_MERGE) */
+
 
 static struct uart_driver mpc52xx_uart_driver;
 
@@ -669,10 +785,11 @@ static struct console mpc52xx_console = {
        .data   = &mpc52xx_uart_driver,
 };
 
-       
-static int __init 
+
+static int __init
 mpc52xx_console_init(void)
 {
+       mpc52xx_uart_of_enumerate();
        register_console(&mpc52xx_console);
        return 0;
 }
@@ -700,6 +817,7 @@ static struct uart_driver mpc52xx_uart_driver = {
 };
 
 
+#if !defined(CONFIG_PPC_MERGE)
 /* ======================================================================== */
 /* Platform Driver                                                          */
 /* ======================================================================== */
@@ -723,8 +841,6 @@ mpc52xx_uart_probe(struct platform_device *dev)
        /* Init the port structure */
        port = &mpc52xx_uart_ports[idx];
 
-       memset(port, 0x00, sizeof(struct uart_port));
-
        spin_lock_init(&port->lock);
        port->uartclk   = __res.bi_ipbfreq / 2; /* Look at CTLR doc */
        port->fifosize  = 512;
@@ -733,6 +849,7 @@ mpc52xx_uart_probe(struct platform_device *dev)
                          ( uart_console(port) ? 0 : UPF_IOREMAP );
        port->line      = idx;
        port->ops       = &mpc52xx_uart_ops;
+       port->dev       = &dev->dev;
 
        /* Search for IRQ and mapbase */
        for (i=0 ; i<dev->num_resources ; i++, res++) {
@@ -771,7 +888,7 @@ mpc52xx_uart_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct uart_port *port = (struct uart_port *) platform_get_drvdata(dev);
 
-       if (sport)
+       if (port)
                uart_suspend_port(&mpc52xx_uart_driver, port);
 
        return 0;
@@ -789,6 +906,7 @@ mpc52xx_uart_resume(struct platform_device *dev)
 }
 #endif
 
+
 static struct platform_driver mpc52xx_uart_platform_driver = {
        .probe          = mpc52xx_uart_probe,
        .remove         = mpc52xx_uart_remove,
@@ -800,6 +918,184 @@ static struct platform_driver mpc52xx_uart_platform_driver = {
                .name   = "mpc52xx-psc",
        },
 };
+#endif /* !defined(CONFIG_PPC_MERGE) */
+
+
+#if defined(CONFIG_PPC_MERGE)
+/* ======================================================================== */
+/* OF Platform Driver                                                       */
+/* ======================================================================== */
+
+static int __devinit
+mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+       int idx = -1;
+       unsigned int ipb_freq;
+       struct uart_port *port = NULL;
+       struct resource res;
+       int ret;
+
+       dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p, match=%p)\n", op, match);
+
+       /* Check validity & presence */
+       for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+               if (mpc52xx_uart_nodes[idx] == op->node)
+                       break;
+       if (idx >= MPC52xx_PSC_MAXNUM)
+               return -EINVAL;
+       pr_debug("Found %s assigned to ttyPSC%x\n",
+                mpc52xx_uart_nodes[idx]->full_name, idx);
+
+       /* Search for bus-frequency property in this node or a parent */
+       if ((ipb_freq = mpc52xx_find_ipb_freq(op->node)) == 0) {
+               dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
+               return -EINVAL;
+       }
+
+       /* Init the port structure */
+       port = &mpc52xx_uart_ports[idx];
+
+       spin_lock_init(&port->lock);
+       port->uartclk   = ipb_freq / 2;
+       port->fifosize  = 512;
+       port->iotype    = UPIO_MEM;
+       port->flags     = UPF_BOOT_AUTOCONF |
+                         ( uart_console(port) ? 0 : UPF_IOREMAP );
+       port->line      = idx;
+       port->ops       = &mpc52xx_uart_ops;
+       port->dev       = &op->dev;
+
+       /* Search for IRQ and mapbase */
+       if ((ret = of_address_to_resource(op->node, 0, &res)) != 0)
+               return ret;
+
+       port->mapbase = res.start;
+       port->irq = irq_of_parse_and_map(op->node, 0);
+
+       dev_dbg(&op->dev, "mpc52xx-psc uart at %lx, irq=%x, freq=%i\n",
+               port->mapbase, port->irq, port->uartclk);
+
+       if ((port->irq==NO_IRQ) || !port->mapbase) {
+               printk(KERN_ERR "Could not allocate resources for PSC\n");
+               return -EINVAL;
+       }
+
+       /* Add the port to the uart sub-system */
+       ret = uart_add_one_port(&mpc52xx_uart_driver, port);
+       if (!ret)
+               dev_set_drvdata(&op->dev, (void*)port);
+
+       return ret;
+}
+
+static int
+mpc52xx_uart_of_remove(struct of_device *op)
+{
+       struct uart_port *port = dev_get_drvdata(&op->dev);
+       dev_set_drvdata(&op->dev, NULL);
+
+       if (port)
+               uart_remove_one_port(&mpc52xx_uart_driver, port);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int
+mpc52xx_uart_of_suspend(struct of_device *op, pm_message_t state)
+{
+       struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+       if (port)
+               uart_suspend_port(&mpc52xx_uart_driver, port);
+
+       return 0;
+}
+
+static int
+mpc52xx_uart_of_resume(struct of_device *op)
+{
+       struct uart_port *port = (struct uart_port *) dev_get_drvdata(&op->dev);
+
+       if (port)
+               uart_resume_port(&mpc52xx_uart_driver, port);
+
+       return 0;
+}
+#endif
+
+static void
+mpc52xx_uart_of_assign(struct device_node *np, int idx)
+{
+       int free_idx = -1;
+       int i;
+
+       /* Find the first free node */
+       for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+               if (mpc52xx_uart_nodes[i] == NULL) {
+                       free_idx = i;
+                       break;
+               }
+       }
+
+       if ((idx < 0) || (idx >= MPC52xx_PSC_MAXNUM))
+               idx = free_idx;
+
+       if (idx < 0)
+               return; /* No free slot; abort */
+
+       /* If the slot is already occupied, then swap slots */
+       if (mpc52xx_uart_nodes[idx] && (free_idx != -1))
+               mpc52xx_uart_nodes[free_idx] = mpc52xx_uart_nodes[idx];
+       mpc52xx_uart_nodes[i] = np;
+}
+
+static void
+mpc52xx_uart_of_enumerate(void)
+{
+       static int enum_done = 0;
+       struct device_node *np;
+       const unsigned int *devno;
+       int i;
+
+       if (enum_done)
+               return;
+
+       for_each_node_by_type(np, "serial") {
+               if (!of_match_node(mpc52xx_uart_of_match, np))
+                       continue;
+
+               /* Is a particular device number requested? */
+               devno = get_property(np, "device_no", NULL);
+               mpc52xx_uart_of_assign(of_node_get(np), devno ? *devno : -1);
+       }
+
+       enum_done = 1;
+
+       for (i = 0; i < MPC52xx_PSC_MAXNUM; i++) {
+               if (mpc52xx_uart_nodes[i])
+                       pr_debug("%s assigned to ttyPSC%x\n",
+                                mpc52xx_uart_nodes[i]->full_name, i);
+       }
+}
+
+MODULE_DEVICE_TABLE(of, mpc52xx_uart_of_match);
+
+static struct of_platform_driver mpc52xx_uart_of_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "mpc52xx-psc-uart",
+       .match_table    = mpc52xx_uart_of_match,
+       .probe          = mpc52xx_uart_of_probe,
+       .remove         = mpc52xx_uart_of_remove,
+#ifdef CONFIG_PM
+       .suspend        = mpc52xx_uart_of_suspend,
+       .resume         = mpc52xx_uart_of_resume,
+#endif
+       .driver         = {
+               .name   = "mpc52xx-psc-uart",
+       },
+};
+#endif /* defined(CONFIG_PPC_MERGE) */
 
 
 /* ======================================================================== */
@@ -811,22 +1107,45 @@ mpc52xx_uart_init(void)
 {
        int ret;
 
-       printk(KERN_INFO "Serial: MPC52xx PSC driver\n");
+       printk(KERN_INFO "Serial: MPC52xx PSC UART driver\n");
 
-       ret = uart_register_driver(&mpc52xx_uart_driver);
-       if (ret == 0) {
-               ret = platform_driver_register(&mpc52xx_uart_platform_driver);
-               if (ret)
-                       uart_unregister_driver(&mpc52xx_uart_driver);
+       if ((ret = uart_register_driver(&mpc52xx_uart_driver)) != 0) {
+               printk(KERN_ERR "%s: uart_register_driver failed (%i)\n",
+                      __FILE__, ret);
+               return ret;
        }
 
-       return ret;
+#if defined(CONFIG_PPC_MERGE)
+       mpc52xx_uart_of_enumerate();
+
+       ret = of_register_platform_driver(&mpc52xx_uart_of_driver);
+       if (ret) {
+               printk(KERN_ERR "%s: of_register_platform_driver failed (%i)\n",
+                      __FILE__, ret);
+               uart_unregister_driver(&mpc52xx_uart_driver);
+               return ret;
+       }
+#else
+       ret = platform_driver_register(&mpc52xx_uart_platform_driver);
+       if (ret) {
+               printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
+                      __FILE__, ret);
+               uart_unregister_driver(&mpc52xx_uart_driver);
+               return ret;
+       }
+#endif
+
+       return 0;
 }
 
 static void __exit
 mpc52xx_uart_exit(void)
 {
+#if defined(CONFIG_PPC_MERGE)
+       of_unregister_platform_driver(&mpc52xx_uart_of_driver);
+#else
        platform_driver_unregister(&mpc52xx_uart_platform_driver);
+#endif
        uart_unregister_driver(&mpc52xx_uart_driver);
 }
 
index 8eea69f29989947d3c8ca0f16c08a0ee08f965bc..29823bd60fb0f69a60dd6deaaff4249c6fcadb60 100644 (file)
@@ -555,7 +555,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info *pi)
        if (!mpsc_sdma_tx_active(pi)) {
                txre = (struct mpsc_tx_desc *)(pi->txr +
                        (pi->txr_tail * MPSC_TXRE_SIZE));
-               dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)txre,
@@ -931,7 +931,7 @@ mpsc_init_rings(struct mpsc_port_info *pi)
        }
        txre->link = cpu_to_be32(pi->txr_p);    /* Wrap last back to first */
 
-       dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
+       dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
                DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1005,7 +1005,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
 
        rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
 
-       dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+       dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                invalidate_dcache_range((ulong)rxre,
@@ -1029,7 +1029,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
                }
 
                bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
-               dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)bp,
@@ -1098,7 +1098,7 @@ next_frame:
                                            SDMA_DESC_CMDSTAT_F |
                                            SDMA_DESC_CMDSTAT_L);
                wmb();
-               dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)rxre,
@@ -1109,7 +1109,7 @@ next_frame:
                pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
                rxre = (struct mpsc_rx_desc *)(pi->rxr +
                        (pi->rxr_posn * MPSC_RXRE_SIZE));
-               dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)rxre,
@@ -1143,7 +1143,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
                                                           SDMA_DESC_CMDSTAT_EI
                                                           : 0));
        wmb();
-       dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
+       dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                flush_dcache_range((ulong)txre,
@@ -1192,7 +1192,7 @@ mpsc_copy_tx_data(struct mpsc_port_info *pi)
                else /* All tx data copied into ring bufs */
                        return;
 
-               dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)bp,
@@ -1217,7 +1217,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
                txre = (struct mpsc_tx_desc *)(pi->txr +
                        (pi->txr_tail * MPSC_TXRE_SIZE));
 
-               dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
+               dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        invalidate_dcache_range((ulong)txre,
@@ -1235,7 +1235,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
 
                        txre = (struct mpsc_tx_desc *)(pi->txr +
                                (pi->txr_tail * MPSC_TXRE_SIZE));
-                       dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
+                       dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
                                DMA_FROM_DEVICE);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                        if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
@@ -1652,7 +1652,7 @@ mpsc_console_write(struct console *co, const char *s, uint count)
                        count--;
                }
 
-               dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
+               dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
                if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
                        flush_dcache_range((ulong)bp,
index 00f9ffd6948969c000b1382b860eb916df25f61d..431433f4dd6d3983a02bb8be5c2b09cd983e8c51 100644 (file)
@@ -723,7 +723,7 @@ static int serial_config(struct pcmcia_device * link)
        u_char *buf;
        cisparse_t *parse;
        cistpl_cftable_entry_t *cf;
-       int i, last_ret, last_fn;
+       int i;
 
        DEBUG(0, "serial_config(0x%p)\n", link);
 
@@ -740,15 +740,6 @@ static int serial_config(struct pcmcia_device * link)
        tuple->TupleOffset = 0;
        tuple->TupleDataMax = 255;
        tuple->Attributes = 0;
-       /* Get configuration register information */
-       tuple->DesiredTuple = CISTPL_CONFIG;
-       last_ret = first_tuple(link, tuple, parse);
-       if (last_ret != CS_SUCCESS) {
-               last_fn = ParseTuple;
-               goto cs_failed;
-       }
-       link->conf.ConfigBase = parse->config.base;
-       link->conf.Present = parse->config.rmask[0];
 
        /* Is this a compliant multifunction card? */
        tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
@@ -757,27 +748,25 @@ static int serial_config(struct pcmcia_device * link)
 
        /* Is this a multiport card? */
        tuple->DesiredTuple = CISTPL_MANFID;
-       if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
-               info->manfid = parse->manfid.manf;
-               info->prodid = parse->manfid.card;
-
-               for (i = 0; i < ARRAY_SIZE(quirks); i++)
-                       if ((quirks[i].manfid == ~0 ||
-                            quirks[i].manfid == info->manfid) &&
-                           (quirks[i].prodid == ~0 ||
-                            quirks[i].prodid == info->prodid)) {
-                               info->quirk = &quirks[i];
-                               break;
-                       }
-       }
+       info->manfid = link->manf_id;
+       info->prodid = link->card_id;
+
+       for (i = 0; i < ARRAY_SIZE(quirks); i++)
+               if ((quirks[i].manfid == ~0 ||
+                    quirks[i].manfid == info->manfid) &&
+                   (quirks[i].prodid == ~0 ||
+                    quirks[i].prodid == info->prodid)) {
+                       info->quirk = &quirks[i];
+                       break;
+               }
 
        /* Another check for dual-serial cards: look for either serial or
           multifunction cards that ask for appropriate IO port ranges */
        tuple->DesiredTuple = CISTPL_FUNCID;
        if ((info->multi == 0) &&
-           ((first_tuple(link, tuple, parse) != CS_SUCCESS) ||
-            (parse->funcid.func == CISTPL_FUNCID_MULTI) ||
-            (parse->funcid.func == CISTPL_FUNCID_SERIAL))) {
+           (link->has_func_id) &&
+           ((link->func_id == CISTPL_FUNCID_MULTI) ||
+            (link->func_id == CISTPL_FUNCID_SERIAL))) {
                tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
                if (first_tuple(link, tuple, parse) == CS_SUCCESS) {
                        if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
@@ -814,8 +803,6 @@ static int serial_config(struct pcmcia_device * link)
        kfree(cfg_mem);
        return 0;
 
- cs_failed:
-       cs_error(link, last_fn, last_ret);
  failed:
        serial_remove(link);
        kfree(cfg_mem);
@@ -925,6 +912,30 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "RS-COM-2P.cis"),
        PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100  1.00.",0x19ca78af,0xf964f42b),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232  1.00.",0x19ca78af,0x69fb7490),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
+       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
        /* too generic */
        /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
        /* PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0160, 0x0002), */
index cfcc3caf49d8f5a0a71997ee1bb6b1833035e6ac..3b5f19ec21263420256345eec45ae32f39f1adcd 100644 (file)
@@ -775,7 +775,7 @@ static int sci_notifier(struct notifier_block *self,
                         *
                         * Clean this up later..
                         */
-                       clk = clk_get("module_clk");
+                       clk = clk_get(NULL, "module_clk");
                        port->uartclk = clk_get_rate(clk) * 16;
                        clk_put(clk);
                }
@@ -960,7 +960,7 @@ static void sci_set_termios(struct uart_port *port, struct termios *termios,
                default:
                {
 #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64)
-                       struct clk *clk = clk_get("module_clk");
+                       struct clk *clk = clk_get(NULL, "module_clk");
                        t = SCBRR_VALUE(baud, clk_get_rate(clk));
                        clk_put(clk);
 #else
@@ -1128,7 +1128,7 @@ static void __init sci_init_ports(void)
                 * XXX: We should use a proper SCI/SCIF clock
                 */
                {
-                       struct clk *clk = clk_get("module_clk");
+                       struct clk *clk = clk_get(NULL, "module_clk");
                        sci_ports[i].port.uartclk = clk_get_rate(clk) * 16;
                        clk_put(clk);
                }
index 7ee992146ae9453c47667a7f8c505d5848644ff0..e4557cc4f74b546493f1297648233e1c2b2447ee 100644 (file)
 # define SCIF_ORER     0x0001          /* Overrun error bit */
 # define SCSCR_INIT(port)      0x3a    /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
 # define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
+# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
+# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
+# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
+# define SCSCR_INIT(port)      0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
+# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
+# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
+# define SCIF_ORER 0x0001  /* overrun error bit */
+# define SCSCR_INIT(port)      0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
+# define SCIF_ONLY
 #else
 # error CPU subtype not defined
 #endif
@@ -365,6 +379,7 @@ SCIx_FNS(SCxSR,  0x08,  8, 0x10,  8, 0x08, 16, 0x10, 16, 0x04,  8)
 SCIx_FNS(SCxRDR, 0x0a,  8, 0x14,  8, 0x0A,  8, 0x14,  8, 0x05,  8)
 SCIF_FNS(SCFCR,                      0x0c,  8, 0x18, 16)
 #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780)
+SCIF_FNS(SCFDR,                             0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCTFDR,                    0x0e, 16, 0x1C, 16)
 SCIF_FNS(SCRFDR,                    0x0e, 16, 0x20, 16)
 SCIF_FNS(SCSPTR,                       0,  0, 0x24, 16)
@@ -544,6 +559,28 @@ static inline int sci_rxd_in(struct uart_port *port)
        if (port->mapbase == 0xffe10000)
                return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
 }
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+       if (port->mapbase == 0xfffe8000)
+               return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+       if (port->mapbase == 0xfffe8800)
+               return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+       if (port->mapbase == 0xfffe9000)
+               return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+       if (port->mapbase == 0xfffe9800)
+               return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
+}
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+static inline int sci_rxd_in(struct uart_port *port)
+{
+       if (port->mapbase == 0xf8400000)
+               return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
+       if (port->mapbase == 0xf8410000)
+               return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
+       if (port->mapbase == 0xf8420000)
+               return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
+}
 #endif
 
 /*
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
new file mode 100644 (file)
index 0000000..8369065
--- /dev/null
@@ -0,0 +1,505 @@
+/*
+ * uartlite.c: Serial driver for Xilinx uartlite serial controller
+ *
+ * Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+
+#define ULITE_MAJOR            204
+#define ULITE_MINOR            187
+#define ULITE_NR_UARTS         4
+
+/* For register details see datasheet:
+   http://www.xilinx.com/bvdocs/ipcenter/data_sheet/opb_uartlite.pdf
+*/
+#define ULITE_RX               0x00
+#define ULITE_TX               0x04
+#define ULITE_STATUS           0x08
+#define ULITE_CONTROL          0x0c
+
+#define ULITE_REGION           16
+
+#define ULITE_STATUS_RXVALID   0x01
+#define ULITE_STATUS_RXFULL    0x02
+#define ULITE_STATUS_TXEMPTY   0x04
+#define ULITE_STATUS_TXFULL    0x08
+#define ULITE_STATUS_IE                0x10
+#define ULITE_STATUS_OVERRUN   0x20
+#define ULITE_STATUS_FRAME     0x40
+#define ULITE_STATUS_PARITY    0x80
+
+#define ULITE_CONTROL_RST_TX   0x01
+#define ULITE_CONTROL_RST_RX   0x02
+#define ULITE_CONTROL_IE       0x10
+
+
+static struct uart_port ports[ULITE_NR_UARTS];
+
+static int ulite_receive(struct uart_port *port, int stat)
+{
+       struct tty_struct *tty = port->info->tty;
+       unsigned char ch = 0;
+       char flag = TTY_NORMAL;
+
+       if ((stat & (ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+                    | ULITE_STATUS_FRAME)) == 0)
+               return 0;
+
+       /* stats */
+       if (stat & ULITE_STATUS_RXVALID) {
+               port->icount.rx++;
+               ch = readb(port->membase + ULITE_RX);
+
+               if (stat & ULITE_STATUS_PARITY)
+                       port->icount.parity++;
+       }
+
+       if (stat & ULITE_STATUS_OVERRUN)
+               port->icount.overrun++;
+
+       if (stat & ULITE_STATUS_FRAME)
+               port->icount.frame++;
+
+
+       /* drop byte with parity error if IGNPAR specificed */
+       if (stat & port->ignore_status_mask & ULITE_STATUS_PARITY)
+               stat &= ~ULITE_STATUS_RXVALID;
+
+       stat &= port->read_status_mask;
+
+       if (stat & ULITE_STATUS_PARITY)
+               flag = TTY_PARITY;
+
+
+       stat &= ~port->ignore_status_mask;
+
+       if (stat & ULITE_STATUS_RXVALID)
+               tty_insert_flip_char(tty, ch, flag);
+
+       if (stat & ULITE_STATUS_FRAME)
+               tty_insert_flip_char(tty, 0, TTY_FRAME);
+
+       if (stat & ULITE_STATUS_OVERRUN)
+               tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+       return 1;
+}
+
+static int ulite_transmit(struct uart_port *port, int stat)
+{
+       struct circ_buf *xmit  = &port->info->xmit;
+
+       if (stat & ULITE_STATUS_TXFULL)
+               return 0;
+
+       if (port->x_char) {
+               writeb(port->x_char, port->membase + ULITE_TX);
+               port->x_char = 0;
+               port->icount.tx++;
+               return 1;
+       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+               return 0;
+
+       writeb(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+       xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
+       port->icount.tx++;
+
+       /* wake up */
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+
+       return 1;
+}
+
+static irqreturn_t ulite_isr(int irq, void *dev_id)
+{
+       struct uart_port *port = (struct uart_port *)dev_id;
+       int busy;
+
+       do {
+               int stat = readb(port->membase + ULITE_STATUS);
+               busy  = ulite_receive(port, stat);
+               busy |= ulite_transmit(port, stat);
+       } while (busy);
+
+       tty_flip_buffer_push(port->info->tty);
+
+       return IRQ_HANDLED;
+}
+
+static unsigned int ulite_tx_empty(struct uart_port *port)
+{
+       unsigned long flags;
+       unsigned int ret;
+
+       spin_lock_irqsave(&port->lock, flags);
+       ret = readb(port->membase + ULITE_STATUS);
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       return ret & ULITE_STATUS_TXEMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int ulite_get_mctrl(struct uart_port *port)
+{
+       return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void ulite_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       /* N/A */
+}
+
+static void ulite_stop_tx(struct uart_port *port)
+{
+       /* N/A */
+}
+
+static void ulite_start_tx(struct uart_port *port)
+{
+       ulite_transmit(port, readb(port->membase + ULITE_STATUS));
+}
+
+static void ulite_stop_rx(struct uart_port *port)
+{
+       /* don't forward any more data (like !CREAD) */
+       port->ignore_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+               | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+}
+
+static void ulite_enable_ms(struct uart_port *port)
+{
+       /* N/A */
+}
+
+static void ulite_break_ctl(struct uart_port *port, int ctl)
+{
+       /* N/A */
+}
+
+static int ulite_startup(struct uart_port *port)
+{
+       int ret;
+
+       ret = request_irq(port->irq, ulite_isr,
+                         IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+       if (ret)
+               return ret;
+
+       writeb(ULITE_CONTROL_RST_RX | ULITE_CONTROL_RST_TX,
+              port->membase + ULITE_CONTROL);
+       writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+       return 0;
+}
+
+static void ulite_shutdown(struct uart_port *port)
+{
+       writeb(0, port->membase + ULITE_CONTROL);
+       readb(port->membase + ULITE_CONTROL); /* dummy */
+       free_irq(port->irq, port);
+}
+
+static void ulite_set_termios(struct uart_port *port, struct termios *termios,
+                             struct termios *old)
+{
+       unsigned long flags;
+       unsigned int baud;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       port->read_status_mask = ULITE_STATUS_RXVALID | ULITE_STATUS_OVERRUN
+               | ULITE_STATUS_TXFULL;
+
+       if (termios->c_iflag & INPCK)
+               port->read_status_mask |=
+                       ULITE_STATUS_PARITY | ULITE_STATUS_FRAME;
+
+       port->ignore_status_mask = 0;
+       if (termios->c_iflag & IGNPAR)
+               port->ignore_status_mask |= ULITE_STATUS_PARITY
+                       | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+       /* ignore all characters if CREAD is not set */
+       if ((termios->c_cflag & CREAD) == 0)
+               port->ignore_status_mask |=
+                       ULITE_STATUS_RXVALID | ULITE_STATUS_PARITY
+                       | ULITE_STATUS_FRAME | ULITE_STATUS_OVERRUN;
+
+       /* update timeout */
+       baud = uart_get_baud_rate(port, termios, old, 0, 460800);
+       uart_update_timeout(port, termios->c_cflag, baud);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *ulite_type(struct uart_port *port)
+{
+       return port->type == PORT_UARTLITE ? "uartlite" : NULL;
+}
+
+static void ulite_release_port(struct uart_port *port)
+{
+       release_mem_region(port->mapbase, ULITE_REGION);
+       iounmap(port->membase);
+       port->membase = 0;
+}
+
+static int ulite_request_port(struct uart_port *port)
+{
+       if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
+               dev_err(port->dev, "Memory region busy\n");
+               return -EBUSY;
+       }
+
+       port->membase = ioremap(port->mapbase, ULITE_REGION);
+       if (!port->membase) {
+               dev_err(port->dev, "Unable to map registers\n");
+               release_mem_region(port->mapbase, ULITE_REGION);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static void ulite_config_port(struct uart_port *port, int flags)
+{
+       ulite_request_port(port);
+       port->type = PORT_UARTLITE;
+}
+
+static int ulite_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+       /* we don't want the core code to modify any port params */
+       return -EINVAL;
+}
+
+static struct uart_ops ulite_ops = {
+       .tx_empty       = ulite_tx_empty,
+       .set_mctrl      = ulite_set_mctrl,
+       .get_mctrl      = ulite_get_mctrl,
+       .stop_tx        = ulite_stop_tx,
+       .start_tx       = ulite_start_tx,
+       .stop_rx        = ulite_stop_rx,
+       .enable_ms      = ulite_enable_ms,
+       .break_ctl      = ulite_break_ctl,
+       .startup        = ulite_startup,
+       .shutdown       = ulite_shutdown,
+       .set_termios    = ulite_set_termios,
+       .type           = ulite_type,
+       .release_port   = ulite_release_port,
+       .request_port   = ulite_request_port,
+       .config_port    = ulite_config_port,
+       .verify_port    = ulite_verify_port
+};
+
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+static void ulite_console_wait_tx(struct uart_port *port)
+{
+       int i;
+
+       /* wait up to 10ms for the character(s) to be sent */
+       for (i = 0; i < 10000; i++) {
+               if (readb(port->membase + ULITE_STATUS) & ULITE_STATUS_TXEMPTY)
+                       break;
+               udelay(1);
+       }
+}
+
+static void ulite_console_putchar(struct uart_port *port, int ch)
+{
+       ulite_console_wait_tx(port);
+       writeb(ch, port->membase + ULITE_TX);
+}
+
+static void ulite_console_write(struct console *co, const char *s,
+                               unsigned int count)
+{
+       struct uart_port *port = &ports[co->index];
+       unsigned long flags;
+       unsigned int ier;
+       int locked = 1;
+
+       if (oops_in_progress) {
+               locked = spin_trylock_irqsave(&port->lock, flags);
+       } else
+               spin_lock_irqsave(&port->lock, flags);
+
+       /* save and disable interrupt */
+       ier = readb(port->membase + ULITE_STATUS) & ULITE_STATUS_IE;
+       writeb(0, port->membase + ULITE_CONTROL);
+
+       uart_console_write(port, s, count, ulite_console_putchar);
+
+       ulite_console_wait_tx(port);
+
+       /* restore interrupt state */
+       if (ier)
+               writeb(ULITE_CONTROL_IE, port->membase + ULITE_CONTROL);
+
+       if (locked)
+               spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int __init ulite_console_setup(struct console *co, char *options)
+{
+       struct uart_port *port;
+       int baud = 9600;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       if (co->index < 0 || co->index >= ULITE_NR_UARTS)
+               return -EINVAL;
+
+       port = &ports[co->index];
+
+       /* not initialized yet? */
+       if (!port->membase)
+               return -ENODEV;
+
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+       return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver ulite_uart_driver;
+
+static struct console ulite_console = {
+       .name   = "ttyUL",
+       .write  = ulite_console_write,
+       .device = uart_console_device,
+       .setup  = ulite_console_setup,
+       .flags  = CON_PRINTBUFFER,
+       .index  = -1, /* Specified on the cmdline (e.g. console=ttyUL0 ) */
+       .data   = &ulite_uart_driver,
+};
+
+static int __init ulite_console_init(void)
+{
+       register_console(&ulite_console);
+       return 0;
+}
+
+console_initcall(ulite_console_init);
+
+#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
+
+static struct uart_driver ulite_uart_driver = {
+       .owner          = THIS_MODULE,
+       .driver_name    = "uartlite",
+       .dev_name       = "ttyUL",
+       .major          = ULITE_MAJOR,
+       .minor          = ULITE_MINOR,
+       .nr             = ULITE_NR_UARTS,
+#ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+       .cons           = &ulite_console,
+#endif
+};
+
+static int __devinit ulite_probe(struct platform_device *pdev)
+{
+       struct resource *res, *res2;
+       struct uart_port *port;
+
+       if (pdev->id < 0 || pdev->id >= ULITE_NR_UARTS)
+               return -EINVAL;
+
+       if (ports[pdev->id].membase)
+               return -EBUSY;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res2)
+               return -ENODEV;
+
+       port = &ports[pdev->id];
+
+       port->fifosize  = 16;
+       port->regshift  = 2;
+       port->iotype    = UPIO_MEM;
+       port->iobase    = 1; /* mark port in use */
+       port->mapbase   = res->start;
+       port->membase   = 0;
+       port->ops       = &ulite_ops;
+       port->irq       = res2->start;
+       port->flags     = UPF_BOOT_AUTOCONF;
+       port->dev       = &pdev->dev;
+       port->type      = PORT_UNKNOWN;
+       port->line      = pdev->id;
+
+       uart_add_one_port(&ulite_uart_driver, port);
+       platform_set_drvdata(pdev, port);
+
+       return 0;
+}
+
+static int ulite_remove(struct platform_device *pdev)
+{
+       struct uart_port *port = platform_get_drvdata(pdev);
+
+       platform_set_drvdata(pdev, NULL);
+
+       if (port)
+               uart_remove_one_port(&ulite_uart_driver, port);
+
+       /* mark port as free */
+       port->membase = 0;
+
+       return 0;
+}
+
+static struct platform_driver ulite_platform_driver = {
+       .probe  = ulite_probe,
+       .remove = ulite_remove,
+       .driver = {
+                  .owner = THIS_MODULE,
+                  .name  = "uartlite",
+                  },
+};
+
+int __init ulite_init(void)
+{
+       int ret;
+
+       ret = uart_register_driver(&ulite_uart_driver);
+       if (ret)
+               return ret;
+
+       ret = platform_driver_register(&ulite_platform_driver);
+       if (ret)
+               uart_unregister_driver(&ulite_uart_driver);
+
+       return ret;
+}
+
+void __exit ulite_exit(void)
+{
+       platform_driver_unregister(&ulite_platform_driver);
+       uart_unregister_driver(&ulite_uart_driver);
+}
+
+module_init(ulite_init);
+module_exit(ulite_exit);
+
+MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
+MODULE_DESCRIPTION("Xilinx uartlite serial driver");
+MODULE_LICENSE("GPL");
index 72025df5561d5baa7836d58c2ea073e8f25728df..494d9b85648879ba42de6a1c45b03f294fa589a6 100644 (file)
@@ -148,7 +148,7 @@ struct chip_data {
        void (*cs_control)(u32 command);
 };
 
-static void pump_messages(void *data);
+static void pump_messages(struct work_struct *work);
 
 static int flush(struct driver_data *drv_data)
 {
@@ -884,9 +884,10 @@ static void pump_transfers(unsigned long data)
        }
 }
 
-static void pump_messages(void *data)
+static void pump_messages(struct work_struct *work)
 {
-       struct driver_data *drv_data = data;
+       struct driver_data *drv_data =
+               container_of(work, struct driver_data, pump_messages);
        unsigned long flags;
 
        /* Lock queue and check for queue work */
@@ -1098,7 +1099,7 @@ static int init_queue(struct driver_data *drv_data)
        tasklet_init(&drv_data->pump_transfers,
                        pump_transfers, (unsigned long)drv_data);
 
-       INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data);
+       INIT_WORK(&drv_data->pump_messages, pump_messages);
        drv_data->workqueue = create_singlethread_workqueue(
                                        drv_data->master->cdev.dev->bus_id);
        if (drv_data->workqueue == NULL)
index c3c0626f550b80469482d8891654485228b220fe..270e6211c2e318e2b970d0e0ece11f90e5847df6 100644 (file)
@@ -360,12 +360,13 @@ spi_alloc_master(struct device *dev, unsigned size)
        if (!dev)
                return NULL;
 
-       master = kzalloc(size + sizeof *master, SLAB_KERNEL);
+       master = kzalloc(size + sizeof *master, GFP_KERNEL);
        if (!master)
                return NULL;
 
        class_device_initialize(&master->cdev);
        master->cdev.class = &spi_master_class;
+       kobj_set_kset_s(&master->cdev, spi_master_class.subsys);
        master->cdev.dev = get_device(dev);
        spi_master_set_devdata(master, &master[1]);
 
@@ -447,7 +448,9 @@ static int __unregister(struct device *dev, void *unused)
  */
 void spi_unregister_master(struct spi_master *master)
 {
-       (void) device_for_each_child(master->cdev.dev, NULL, __unregister);
+       int dummy;
+
+       dummy = device_for_each_child(master->cdev.dev, NULL, __unregister);
        class_device_unregister(&master->cdev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -463,15 +466,13 @@ EXPORT_SYMBOL_GPL(spi_unregister_master);
  */
 struct spi_master *spi_busnum_to_master(u16 bus_num)
 {
-       if (bus_num) {
-               char                    name[8];
-               struct kobject          *bus;
-
-               snprintf(name, sizeof name, "spi%u", bus_num);
-               bus = kset_find_obj(&spi_master_class.subsys.kset, name);
-               if (bus)
-                       return container_of(bus, struct spi_master, cdev.kobj);
-       }
+       char                    name[9];
+       struct kobject          *bus;
+
+       snprintf(name, sizeof name, "spi%u", bus_num);
+       bus = kset_find_obj(&spi_master_class.subsys.kset, name);
+       if (bus)
+               return container_of(bus, struct spi_master, cdev.kobj);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
@@ -607,7 +608,7 @@ static int __init spi_init(void)
 {
        int     status;
 
-       buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL);
+       buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
        if (!buf) {
                status = -ENOMEM;
                goto err0;
index a23862ef72b26cac0685cc22963d321e0919039c..57289b61d0beadd3dd543b853c233918f22daccc 100644 (file)
@@ -196,7 +196,7 @@ int spi_bitbang_setup(struct spi_device *spi)
                return -EINVAL;
 
        if (!cs) {
-               cs = kzalloc(sizeof *cs, SLAB_KERNEL);
+               cs = kzalloc(sizeof *cs, GFP_KERNEL);
                if (!cs)
                        return -ENOMEM;
                spi->controller_state = cs;
@@ -265,9 +265,10 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
  * Drivers can provide word-at-a-time i/o primitives, or provide
  * transfer-at-a-time ones to leverage dma or fifo hardware.
  */
-static void bitbang_work(void *_bitbang)
+static void bitbang_work(struct work_struct *work)
 {
-       struct spi_bitbang      *bitbang = _bitbang;
+       struct spi_bitbang      *bitbang =
+               container_of(work, struct spi_bitbang, work);
        unsigned long           flags;
 
        spin_lock_irqsave(&bitbang->lock, flags);
@@ -456,7 +457,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
        if (!bitbang->master || !bitbang->chipselect)
                return -EINVAL;
 
-       INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+       INIT_WORK(&bitbang->work, bitbang_work);
        spin_lock_init(&bitbang->lock);
        INIT_LIST_HEAD(&bitbang->queue);
 
index c2f601f8e4f21df7106952a147c83c2e13626fca..312987a0321072aa2c9974c8b476ffc97416b7bb 100644 (file)
@@ -251,6 +251,8 @@ static void butterfly_attach(struct parport *p)
         * setting up a platform device like this is an ugly kluge...
         */
        pdev = platform_device_register_simple("butterfly", -1, NULL, 0);
+       if (IS_ERR(pdev))
+               return;
 
        master = spi_alloc_master(&pdev->dev, sizeof *pp);
        if (!master) {
index dda0ca45d904ff6b8323f83fb70d1a714b67f582..164a5dcf1f1e69a31d01a28348ab393e30100d1d 100644 (file)
@@ -69,25 +69,21 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
 
 static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
 {
-       tuple_t tuple;
-       u_short buf[128];
        char *str;
-       int last_ret, last_fn, i, place;
+       int i, place;
        DEBUG(0, "ixj_get_serial(0x%p)\n", link);
-       tuple.TupleData = (cisdata_t *) buf;
-       tuple.TupleOffset = 0;
-       tuple.TupleDataMax = 80;
-       tuple.Attributes = 0;
-       tuple.DesiredTuple = CISTPL_VERS_1;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       str = (char *) buf;
-       printk("PCMCIA Version %d.%d\n", str[0], str[1]);
-       str += 2;
+
+       str = link->prod_id[0];
+       if (!str)
+               goto cs_failed;
        printk("%s", str);
-       str = str + strlen(str) + 1;
+       str = link->prod_id[1];
+       if (!str)
+               goto cs_failed;
        printk(" %s", str);
-       str = str + strlen(str) + 1;
+       str = link->prod_id[2];
+       if (!str)
+               goto cs_failed;
        place = 1;
        for (i = strlen(str) - 1; i >= 0; i--) {
                switch (str[i]) {
@@ -122,7 +118,9 @@ static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
                }
                place = place * 0x10;
        }
-       str = str + strlen(str) + 1;
+       str = link->prod_id[3];
+       if (!str)
+               goto cs_failed;
        printk(" version %s\n", str);
       cs_failed:
        return;
@@ -146,13 +144,6 @@ static int ixj_config(struct pcmcia_device * link)
        tuple.TupleData = (cisdata_t *) buf;
        tuple.TupleOffset = 0;
        tuple.TupleDataMax = 255;
-       tuple.Attributes = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        tuple.Attributes = 0;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
index e6565633ba0fa7801fa8687ff0580fbb1c92b879..3dfa3e40e14890f84e9fe86bbf53b8288e635f5d 100644 (file)
@@ -158,7 +158,7 @@ struct cxacru_data {
        const struct cxacru_modem_type *modem_type;
 
        int line_status;
-       struct work_struct poll_work;
+       struct delayed_work poll_work;
 
        /* contol handles */
        struct mutex cm_serialize;
@@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance)
        return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance);
+static void cxacru_poll_status(struct work_struct *work);
 
 static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
                struct atm_dev *atm_dev)
@@ -376,12 +376,14 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
        }
 
        /* Start status polling */
-       cxacru_poll_status(instance);
+       cxacru_poll_status(&instance->poll_work.work);
        return 0;
 }
 
-static void cxacru_poll_status(struct cxacru_data *instance)
+static void cxacru_poll_status(struct work_struct *work)
 {
+       struct cxacru_data *instance =
+               container_of(work, struct cxacru_data, poll_work.work);
        u32 buf[CXINF_MAX] = {};
        struct usbatm_data *usbatm = instance->usbatm;
        struct atm_dev *atm_dev = usbatm->atm_dev;
@@ -720,7 +722,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
 
        mutex_init(&instance->cm_serialize);
 
-       INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance);
+       INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status);
 
        usbatm_instance->driver_data = instance;
 
index a823486495c3a0950ba99cc5d47096927865be07..8ed6c75adf0f3ff640aa8265be7262d37bd31082 100644 (file)
@@ -142,7 +142,7 @@ struct speedtch_instance_data {
 
        struct speedtch_params params; /* set in probe, constant afterwards */
 
-       struct work_struct status_checker;
+       struct delayed_work status_checker;
 
        unsigned char last_status;
 
@@ -498,8 +498,11 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance)
        return ret;
 }
 
-static void speedtch_check_status(struct speedtch_instance_data *instance)
+static void speedtch_check_status(struct work_struct *work)
 {
+       struct speedtch_instance_data *instance =
+               container_of(work, struct speedtch_instance_data,
+                            status_checker.work);
        struct usbatm_data *usbatm = instance->usbatm;
        struct atm_dev *atm_dev = usbatm->atm_dev;
        unsigned char *buf = instance->scratch_buffer;
@@ -576,7 +579,7 @@ static void speedtch_status_poll(unsigned long data)
 {
        struct speedtch_instance_data *instance = (void *)data;
 
-       schedule_work(&instance->status_checker);
+       schedule_delayed_work(&instance->status_checker, 0);
 
        /* The following check is racy, but the race is harmless */
        if (instance->poll_delay < MAX_POLL_DELAY)
@@ -596,7 +599,7 @@ static void speedtch_resubmit_int(unsigned long data)
        if (int_urb) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
                if (!ret)
-                       schedule_work(&instance->status_checker);
+                       schedule_delayed_work(&instance->status_checker, 0);
                else {
                        atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
@@ -640,7 +643,7 @@ static void speedtch_handle_int(struct urb *int_urb)
 
        if ((int_urb = instance->int_urb)) {
                ret = usb_submit_urb(int_urb, GFP_ATOMIC);
-               schedule_work(&instance->status_checker);
+               schedule_delayed_work(&instance->status_checker, 0);
                if (ret < 0) {
                        atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
                        goto fail;
@@ -855,7 +858,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
 
        usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
 
-       INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance);
+       INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status);
 
        instance->status_checker.timer.function = speedtch_status_poll;
        instance->status_checker.timer.data = (unsigned long)instance;
index c137c041f7a43057d355b40654b31816a7092a50..dae4ef1e8fe592f6338a3d75917134c3198f5e4c 100644 (file)
@@ -64,6 +64,8 @@
 #include <linux/kthread.h>
 #include <linux/version.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
+
 #include <asm/unaligned.h>
 
 #include "usbatm.h"
@@ -655,9 +657,9 @@ static int request_dsp(struct uea_softc *sc)
 /*
  * The uea_load_page() function must be called within a process context
  */
-static void uea_load_page(void *xsc)
+static void uea_load_page(struct work_struct *work)
 {
-       struct uea_softc *sc = xsc;
+       struct uea_softc *sc = container_of(work, struct uea_softc, task);
        u16 pageno = sc->pageno;
        u16 ovl = sc->ovl;
        struct block_info bi;
@@ -1348,7 +1350,7 @@ static int uea_boot(struct uea_softc *sc)
 
        uea_enters(INS_TO_USBDEV(sc));
 
-       INIT_WORK(&sc->task, uea_load_page, sc);
+       INIT_WORK(&sc->task, uea_load_page);
        init_waitqueue_head(&sc->sync_q);
        init_waitqueue_head(&sc->cmv_ack_wait);
 
index ec3438dc8ee5e99d68346c7d428c7fae75b4c148..7f1fa956dcdb6b0c558f8bb9a835a07222f22867 100644 (file)
@@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb)
                schedule_work(&acm->work);
 }
 
-static void acm_softint(void *private)
+static void acm_softint(struct work_struct *work)
 {
-       struct acm *acm = private;
+       struct acm *acm = container_of(work, struct acm, work);
        dbg("Entering acm_softint.");
        
        if (!ACM_READY(acm))
@@ -927,7 +927,7 @@ skip_normal_probe:
        acm->rx_buflimit = num_rx_buf;
        acm->urb_task.func = acm_rx_tasklet;
        acm->urb_task.data = (unsigned long) acm;
-       INIT_WORK(&acm->work, acm_softint, acm);
+       INIT_WORK(&acm->work, acm_softint);
        spin_lock_init(&acm->throttle_lock);
        spin_lock_init(&acm->write_lock);
        spin_lock_init(&acm->read_lock);
index 840442a25b6183844979b3c2f5fc58d3607fb7ac..c3915dc28608da7aa3ef3ae88fc005cbdf37eba7 100644 (file)
@@ -93,7 +93,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd)
 }
 
 
-/* sometimes alloc/free could use kmalloc with SLAB_DMA, for
+/* sometimes alloc/free could use kmalloc with GFP_DMA, for
  * better sharing and to leverage mm/slab.c intelligence.
  */
 
index 0ce393eb3c4b0bea98e9a6ec8156ca5272160018..2651c2e2a89f5671e842b4c979f84f8d75857449 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/usbdevice_fs.h>
 #include <linux/kthread.h>
 #include <linux/mutex.h>
+#include <linux/freezer.h>
 
 #include <asm/semaphore.h>
 #include <asm/uaccess.h>
@@ -68,7 +69,7 @@ struct usb_hub {
 
        unsigned                has_indicators:1;
        u8                      indicator[USB_MAXCHILDREN];
-       struct work_struct      leds;
+       struct delayed_work     leds;
 };
 
 
@@ -218,9 +219,10 @@ static void set_port_led(
 
 #define        LED_CYCLE_PERIOD        ((2*HZ)/3)
 
-static void led_work (void *__hub)
+static void led_work (struct work_struct *work)
 {
-       struct usb_hub          *hub = __hub;
+       struct usb_hub          *hub =
+               container_of(work, struct usb_hub, leds.work);
        struct usb_device       *hdev = hub->hdev;
        unsigned                i;
        unsigned                changed = 0;
@@ -405,9 +407,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
  * talking to TTs must queue control transfers (not just bulk and iso), so
  * both can talk to the same hub concurrently.
  */
-static void hub_tt_kevent (void *arg)
+static void hub_tt_kevent (struct work_struct *work)
 {
-       struct usb_hub          *hub = arg;
+       struct usb_hub          *hub =
+               container_of(work, struct usb_hub, tt.kevent);
        unsigned long           flags;
 
        spin_lock_irqsave (&hub->tt.lock, flags);
@@ -458,7 +461,7 @@ void usb_hub_tt_clear_buffer (struct usb_device *udev, int pipe)
         * since each TT has "at least two" buffers that can need it (and
         * there can be many TTs per hub).  even if they're uncommon.
         */
-       if ((clear = kmalloc (sizeof *clear, SLAB_ATOMIC)) == NULL) {
+       if ((clear = kmalloc (sizeof *clear, GFP_ATOMIC)) == NULL) {
                dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
                /* FIXME recover somehow ... RESET_TT? */
                return;
@@ -694,7 +697,7 @@ static int hub_configure(struct usb_hub *hub,
 
        spin_lock_init (&hub->tt.lock);
        INIT_LIST_HEAD (&hub->tt.clear_list);
-       INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub);
+       INIT_WORK (&hub->tt.kevent, hub_tt_kevent);
        switch (hdev->descriptor.bDeviceProtocol) {
                case 0:
                        break;
@@ -938,7 +941,7 @@ descriptor_error:
        INIT_LIST_HEAD(&hub->event_list);
        hub->intfdev = &intf->dev;
        hub->hdev = hdev;
-       INIT_WORK(&hub->leds, led_work, hub);
+       INIT_DELAYED_WORK(&hub->leds, led_work);
 
        usb_set_intfdata (intf, hub);
        intf->needs_remote_wakeup = 1;
@@ -2369,7 +2372,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
        struct usb_qualifier_descriptor *qual;
        int                             status;
 
-       qual = kmalloc (sizeof *qual, SLAB_KERNEL);
+       qual = kmalloc (sizeof *qual, GFP_KERNEL);
        if (qual == NULL)
                return;
 
@@ -2381,7 +2384,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
                /* hub LEDs are probably harder to miss than syslog */
                if (hub->has_indicators) {
                        hub->indicator[port1-1] = INDICATOR_GREEN_BLINK;
-                       schedule_work (&hub->leds);
+                       schedule_delayed_work (&hub->leds, 0);
                }
        }
        kfree(qual);
@@ -2555,7 +2558,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
                                if (hub->has_indicators) {
                                        hub->indicator[port1-1] =
                                                INDICATOR_AMBER_BLINK;
-                                       schedule_work (&hub->leds);
+                                       schedule_delayed_work (&hub->leds, 0);
                                }
                                status = -ENOTCONN;     /* Don't retry */
                                goto loop_disable;
@@ -2920,7 +2923,7 @@ static int config_descriptors_changed(struct usb_device *udev)
                if (len < le16_to_cpu(udev->config[index].desc.wTotalLength))
                        len = le16_to_cpu(udev->config[index].desc.wTotalLength);
        }
-       buf = kmalloc (len, SLAB_KERNEL);
+       buf = kmalloc (len, GFP_KERNEL);
        if (buf == NULL) {
                dev_err(&udev->dev, "no mem to re-read configs after reset\n");
                /* assume the worst */
index 29b0fa9ff9d0a7e2776e24217cc9619cd33f4450..149aa8bfb1fe4e2bb353ae49f490e7dc081ea48a 100644 (file)
@@ -488,7 +488,7 @@ void usb_sg_wait (struct usb_sg_request *io)
                int     retval;
 
                io->urbs [i]->dev = io->dev;
-               retval = usb_submit_urb (io->urbs [i], SLAB_ATOMIC);
+               retval = usb_submit_urb (io->urbs [i], GFP_ATOMIC);
 
                /* after we submit, let completions or cancelations fire;
                 * we handshake using io->status.
@@ -1501,9 +1501,10 @@ struct set_config_request {
 };
 
 /* Worker routine for usb_driver_set_configuration() */
-static void driver_set_config_work(void *_req)
+static void driver_set_config_work(struct work_struct *work)
 {
-       struct set_config_request *req = _req;
+       struct set_config_request *req =
+               container_of(work, struct set_config_request, work);
 
        usb_lock_device(req->udev);
        usb_set_configuration(req->udev, req->config);
@@ -1541,7 +1542,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
                return -ENOMEM;
        req->udev = udev;
        req->config = config;
-       INIT_WORK(&req->work, driver_set_config_work, req);
+       INIT_WORK(&req->work, driver_set_config_work);
 
        usb_get_dev(udev);
        if (!schedule_work(&req->work)) {
index 81cb52564e681eb1e4a4b35bbb8a7e6d2d4e2562..02426d0b9a347f514f69b4c4f706bb9c5a48caef 100644 (file)
@@ -203,9 +203,10 @@ static void ksuspend_usb_cleanup(void)
 #ifdef CONFIG_USB_SUSPEND
 
 /* usb_autosuspend_work - callback routine to autosuspend a USB device */
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {
-       struct usb_device       *udev = _udev;
+       struct usb_device *udev =
+               container_of(work, struct usb_device, autosuspend.work);
 
        usb_pm_lock(udev);
        udev->auto_pm = 1;
@@ -215,7 +216,7 @@ static void usb_autosuspend_work(void *_udev)
 
 #else
 
-static void usb_autosuspend_work(void *_udev)
+static void usb_autosuspend_work(struct work_struct *work)
 {}
 
 #endif /* CONFIG_USB_SUSPEND */
@@ -304,7 +305,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1)
 
 #ifdef CONFIG_PM
        mutex_init(&dev->pm_mutex);
-       INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev);
+       INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
 #endif
        return dev;
 }
index 3bd1dfe565c1bc8a86cdd872115020477ba3e8db..d15bf22b9a030833991bb5b53fa589ed67c48f2b 100644 (file)
@@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags)
        spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
-static void eth_work (void *_dev)
+static void eth_work (struct work_struct *work)
 {
-       struct eth_dev          *dev = _dev;
+       struct eth_dev  *dev = container_of(work, struct eth_dev, work);
 
        if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) {
                if (netif_running (dev->net))
@@ -2398,7 +2398,7 @@ autoconf_fail:
        dev = netdev_priv(net);
        spin_lock_init (&dev->lock);
        spin_lock_init (&dev->req_lock);
-       INIT_WORK (&dev->work, eth_work, dev);
+       INIT_WORK (&dev->work, eth_work);
        INIT_LIST_HEAD (&dev->tx_reqs);
        INIT_LIST_HEAD (&dev->rx_reqs);
 
index 8b975d15538ddff6fb93e2757e7d1888be538881..c98316ce8384494ef864f077ddf9222650afa763 100644 (file)
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/utsname.h>
 
 #include <linux/usb_ch9.h>
index 64554acad63f915ca41d06fe69a2ac66c12d0fa9..31351826f2baddc7c18d17276016745f3d5c2491 100644 (file)
@@ -1236,7 +1236,7 @@ autoconf_fail:
 
 
        /* ok, we made sense of the hardware ... */
-       dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev) {
                return -ENOMEM;
        }
index a3076da3f4eb03f60dc93494346b2b9d54755fb6..805a9826842d51a1c9a05443d4208fa62e477cb1 100644 (file)
@@ -1864,7 +1864,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* alloc, and start init */
-       dev = kmalloc (sizeof *dev, SLAB_KERNEL);
+       dev = kmalloc (sizeof *dev, GFP_KERNEL);
        if (dev == NULL){
                pr_debug("enomem %s\n", pci_name(pdev));
                retval = -ENOMEM;
index 86924f9cdd7e0c5e57845e4f4a41d63a188414b2..3fb1044a4db03eb41bde7022f7773ae9640b6be8 100644 (file)
@@ -412,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
        /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
 
        value = -ENOMEM;
-       kbuf = kmalloc (len, SLAB_KERNEL);
+       kbuf = kmalloc (len, GFP_KERNEL);
        if (unlikely (!kbuf))
                goto free1;
 
@@ -456,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
 
        value = -ENOMEM;
-       kbuf = kmalloc (len, SLAB_KERNEL);
+       kbuf = kmalloc (len, GFP_KERNEL);
        if (!kbuf)
                goto free1;
        if (copy_from_user (kbuf, buf, len)) {
@@ -1898,7 +1898,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        buf += 4;
        length -= 4;
 
-       kbuf = kmalloc (length, SLAB_KERNEL);
+       kbuf = kmalloc (length, GFP_KERNEL);
        if (!kbuf)
                return -ENOMEM;
        if (copy_from_user (kbuf, buf, length)) {
index 0b590831582c43f5fb96e275125aeb082beeca53..3024c679e38e9fcec4e5ada07f8bc00bd799f045 100644 (file)
@@ -2861,7 +2861,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* alloc, and start init */
-       dev = kzalloc (sizeof *dev, SLAB_KERNEL);
+       dev = kzalloc (sizeof *dev, GFP_KERNEL);
        if (dev == NULL){
                retval = -ENOMEM;
                goto done;
index 48a09fd89d18f44158ebcfe2e4041b01698c5679..030d87c28c2f614f4874c6c1867638fe410b5e87 100644 (file)
@@ -2581,7 +2581,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
        /* UDC_PULLUP_EN gates the chip clock */
        // OTG_SYSCON_1_REG |= DEV_IDLE_EN;
 
-       udc = kzalloc(sizeof(*udc), SLAB_KERNEL);
+       udc = kzalloc(sizeof(*udc), GFP_KERNEL);
        if (!udc)
                return -ENOMEM;
 
index 0f809dd684921662a0aa41a4dc99d54f90f3e2f7..40710ea1b4900daeed459219d17aa0cc41dd5ebe 100644 (file)
@@ -1190,7 +1190,7 @@ autoconf_fail:
 
 
        /* ok, we made sense of the hardware ... */
-       dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
        spin_lock_init (&dev->lock);
index 34b7a31cd85b90900a45a8626c186084a16f234f..56349d21e6ea9105d6ce6fbd19eaa6b142b3d83d 100644 (file)
@@ -492,7 +492,7 @@ show_periodic (struct class_device *class_dev, char *buf)
        unsigned                i;
        __le32                  tag;
 
-       if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+       if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
                return 0;
        seen_count = 0;
 
index 87eca6aeacf255dcf04b0fce1e0effbd4eee9e0d..9325e46a68c03d847386a5d3884d8cc163039828 100644 (file)
@@ -188,7 +188,7 @@ static DEFINE_TIMER(bulk_eot_timer, NULL, 0, 0);
 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
 
-#define SLAB_FLAG     (in_interrupt() ? SLAB_ATOMIC : SLAB_KERNEL)
+#define SLAB_FLAG     (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 #define KMALLOC_FLAG  (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
 
 /* Most helpful debugging aid */
@@ -275,13 +275,13 @@ static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
 static int zout_buffer[4] __attribute__ ((aligned (4)));
 
 /* Cache for allocating new EP and SB descriptors. */
-static kmem_cache_t *usb_desc_cache;
+static struct kmem_cache *usb_desc_cache;
 
 /* Cache for the registers allocated in the top half. */
-static kmem_cache_t *top_half_reg_cache;
+static struct kmem_cache *top_half_reg_cache;
 
 /* Cache for the data allocated in the isoc descr top half. */
-static kmem_cache_t *isoc_compl_cache;
+static struct kmem_cache *isoc_compl_cache;
 
 static struct usb_bus *etrax_usb_bus;
 
@@ -1743,7 +1743,7 @@ static irqreturn_t etrax_usb_tx_interrupt(int irq, void *vhc)
 
                *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
 
-               comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, SLAB_ATOMIC);
+               comp_data = (usb_isoc_complete_data_t*)kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
                assert(comp_data != NULL);
 
                 INIT_WORK(&comp_data->usb_bh, etrax_usb_isoc_descr_interrupt_bottom_half, comp_data);
@@ -3010,7 +3010,7 @@ static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
                        if (!urb->iso_frame_desc[i].length)
                                continue;
 
-                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
                        assert(next_sb_desc != NULL);
 
                        if (urb->iso_frame_desc[i].length > 0) {
@@ -3063,7 +3063,7 @@ static void etrax_usb_add_to_isoc_sb_list(struct urb *urb, int epid)
                if (TxIsocEPList[epid].sub == 0) {
                        dbg_isoc("Isoc traffic not already running, allocating SB");
 
-                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_ATOMIC);
+                       next_sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, GFP_ATOMIC);
                        assert(next_sb_desc != NULL);
 
                        next_sb_desc->command = (IO_STATE(USB_SB_command, tt, in) |
@@ -3317,7 +3317,7 @@ static irqreturn_t etrax_usb_hc_interrupt_top_half(int irq, void *vhc)
 
        restore_flags(flags);
 
-       reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, SLAB_ATOMIC);
+       reg = (usb_interrupt_registers_t *)kmem_cache_alloc(top_half_reg_cache, GFP_ATOMIC);
 
        assert(reg != NULL);
 
index 8293c1d4be3f3049e0fb531cff33966436803541..0f47a57dac28578a25e745c343a589796f8838cf 100644 (file)
@@ -505,7 +505,7 @@ show_periodic (struct class_device *class_dev, char *buf)
        char                    *next;
        unsigned                i;
 
-       if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, SLAB_ATOMIC)))
+       if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
                return 0;
        seen_count = 0;
 
index 2dbb77414905209969e86724c5a15f80791c061c..7f26f9bdbaf1e61e70a8090e9ed3648f538f7f85 100644 (file)
@@ -134,7 +134,7 @@ static int isp1301_attach(struct i2c_adapter *adap, int addr, int kind)
 {
        struct i2c_client *c;
 
-       c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL);
+       c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL);
 
        if (!c)
                return -ENOMEM;
index 54f554e0f0ade94f5ed961930629676e8a79c0a5..ac9f11d19817d93e2fedcaf9cb26496a529df301 100644 (file)
@@ -169,21 +169,14 @@ static int sl811_cs_config(struct pcmcia_device *link)
 
        DBG(0, "sl811_cs_config(0x%p)\n", link);
 
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       tuple.Attributes = 0;
-       tuple.TupleData = buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, &parse));
-       link->conf.ConfigBase = parse.config.base;
-       link->conf.Present = parse.config.rmask[0];
-
        /* Look up the current Vcc */
        CS_CHECK(GetConfigurationInfo,
                        pcmcia_get_configuration_info(link, &conf));
 
+       tuple.Attributes = 0;
+       tuple.TupleData = buf;
+       tuple.TupleDataMax = sizeof(buf);
+       tuple.TupleOffset = 0;
        tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
        CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
        while (1) {
index ef54e310bfc49033203cb88cc2aea69e380fe6bd..a9d7119e3176921958aeafcf977aa4cb5bd17573 100644 (file)
@@ -163,7 +163,7 @@ struct u132_endp {
         u16 queue_next;
         struct urb *urb_list[ENDP_QUEUE_SIZE];
         struct list_head urb_more;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 struct u132_ring {
         unsigned in_use:1;
@@ -171,7 +171,7 @@ struct u132_ring {
         u8 number;
         struct u132 *u132;
         struct u132_endp *curr_endp;
-        struct work_struct scheduler;
+        struct delayed_work scheduler;
 };
 #define OHCI_QUIRK_AMD756 0x01
 #define OHCI_QUIRK_SUPERIO 0x02
@@ -198,7 +198,7 @@ struct u132 {
         u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
         int flags;
         unsigned long next_statechange;
-        struct work_struct monitor;
+        struct delayed_work monitor;
         int num_endpoints;
         struct u132_addr addr[MAX_U132_ADDRS];
         struct u132_udev udev[MAX_U132_UDEVS];
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
         if (delta > 0) {
                 if (queue_delayed_work(workqueue, &ring->scheduler, delta))
                         return;
-        } else if (queue_work(workqueue, &ring->scheduler))
+        } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
                 return;
         kref_put(&u132->kref, u132_hcd_delete);
         return;
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132,
 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &endp->scheduler, delta))
-                        kref_get(&endp->kref);
-        } else if (queue_work(workqueue, &endp->scheduler))
-                kref_get(&endp->kref);
-        return;
+       if (queue_delayed_work(workqueue, &endp->scheduler, delta))
+               kref_get(&endp->kref);
 }
 
 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132)
 
 static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta)) {
-                        kref_get(&u132->kref);
-                }
-        } else if (queue_work(workqueue, &u132->monitor))
-                kref_get(&u132->kref);
-        return;
+       if (queue_delayed_work(workqueue, &u132->monitor, delta))
+               kref_get(&u132->kref);
 }
 
 static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(workqueue, &u132->monitor, delta))
-                        return;
-        } else if (queue_work(workqueue, &u132->monitor))
-                return;
-        kref_put(&u132->kref, u132_hcd_delete);
-        return;
+       if (!queue_delayed_work(workqueue, &u132->monitor, delta))
+               kref_put(&u132->kref, u132_hcd_delete);
 }
 
 static void u132_monitor_cancel_work(struct u132 *u132)
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132)
         return 0;
 }
 
-static void u132_hcd_monitor_work(void *data)
+static void u132_hcd_monitor_work(struct work_struct *work)
 {
-        struct u132 *u132 = data;
+        struct u132 *u132 = container_of(work, struct u132, monitor.work);
         if (u132->going > 1) {
                 dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
                         , u132->going);
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
         }
 }
 
-static void u132_hcd_ring_work_scheduler(void *data);
-static void u132_hcd_endp_work_scheduler(void *data);
 /*
 * this work function is only executed from the work queue
 *
 */
-static void u132_hcd_ring_work_scheduler(void *data)
+static void u132_hcd_ring_work_scheduler(struct work_struct *work)
 {
-        struct u132_ring *ring = data;
+        struct u132_ring *ring =
+               container_of(work, struct u132_ring, scheduler.work);
         struct u132 *u132 = ring->u132;
         down(&u132->scheduler_lock);
         if (ring->in_use) {
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data)
         }
 }
 
-static void u132_hcd_endp_work_scheduler(void *data)
+static void u132_hcd_endp_work_scheduler(struct work_struct *work)
 {
         struct u132_ring *ring;
-        struct u132_endp *endp = data;
+        struct u132_endp *endp =
+               container_of(work, struct u132_endp, scheduler.work);
         struct u132 *u132 = endp->u132;
         down(&u132->scheduler_lock);
         ring = endp->ring;
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132,
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132,
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         endp->dequeueing = 0;
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132,
         if (!endp) {
                 return -ENOMEM;
         }
-        INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp);
+        INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
         spin_lock_init(&endp->queue_lock.slock);
         INIT_LIST_HEAD(&endp->urb_more);
         ring = endp->ring = &u132->ring[0];
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
                 ring->number = rings + 1;
                 ring->length = 0;
                 ring->curr_endp = NULL;
-                INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler,
-                        (void *)ring);
+                INIT_DELAYED_WORK(&ring->scheduler,
+                                 u132_hcd_ring_work_scheduler);
         } down(&u132->sw_lock);
-        INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132);
+        INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
         while (ports-- > 0) {
                 struct u132_port *port = &u132->port[ports];
                 port->u132 = u132;
index 226bf3de8edd5ce092c551f0a90daa96ba2fc9e4..e87692c31be470b6df00d7a305154918bfa82ce7 100644 (file)
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(debug, "Debug level");
 static char *errbuf;
 #define ERRBUF_LEN    (32 * 1024)
 
-static kmem_cache_t *uhci_up_cachep;   /* urb_priv */
+static struct kmem_cache *uhci_up_cachep;      /* urb_priv */
 
 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
 static void wakeup_rh(struct uhci_hcd *uhci);
index 06115f22a4fac7420113b7b30d134b08cf3aa6b3..30b88459ac7dc544cba4b89819a2ad362471967a 100644 (file)
@@ -498,7 +498,7 @@ static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
 {
        struct urb_priv *urbp;
 
-       urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
+       urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC);
        if (!urbp)
                return NULL;
 
index 0096373b5f98b974f5c67c9341f67e7718c877a2..909138e5aa04029685ec901d31ecf51c15f9376e 100644 (file)
@@ -152,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
        if (!acecad || !input_dev)
                goto fail1;
 
-       acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma);
+       acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma);
        if (!acecad->data)
                goto fail1;
 
index bf428184608fe435cffbe3b15e2d7df401524f5c..9f52429ce6543156d328ef4ac947201bf87a726b 100644 (file)
@@ -1988,7 +1988,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
                goto fail1;
 
        aiptek->data = usb_buffer_alloc(usbdev, AIPTEK_PACKET_LENGTH,
-                                       SLAB_ATOMIC, &aiptek->data_dma);
+                                       GFP_ATOMIC, &aiptek->data_dma);
        if (!aiptek->data)
                goto fail1;
 
index ff23318dc301aeb7eecd626395015af70f702327..b724e36f7b9203385039fff0b14c382b6a170dec 100644 (file)
@@ -592,7 +592,7 @@ static void ati_remote_irq_in(struct urb *urb)
                        __FUNCTION__, urb->status);
        }
 
-       retval = usb_submit_urb(urb, SLAB_ATOMIC);
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
        if (retval)
                dev_err(&ati_remote->interface->dev, "%s: usb_submit_urb()=%d\n",
                        __FUNCTION__, retval);
@@ -604,12 +604,12 @@ static void ati_remote_irq_in(struct urb *urb)
 static int ati_remote_alloc_buffers(struct usb_device *udev,
                                    struct ati_remote *ati_remote)
 {
-       ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+       ati_remote->inbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
                                             &ati_remote->inbuf_dma);
        if (!ati_remote->inbuf)
                return -1;
 
-       ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, SLAB_ATOMIC,
+       ati_remote->outbuf = usb_buffer_alloc(udev, DATA_BUFSIZE, GFP_ATOMIC,
                                              &ati_remote->outbuf_dma);
        if (!ati_remote->outbuf)
                return -1;
index a49644b7c58e1492a19d21d533105254138f78f7..f1d0e1d69828d4db297f22086001a1ee86662445 100644 (file)
@@ -969,9 +969,10 @@ static void hid_retry_timeout(unsigned long _hid)
 }
 
 /* Workqueue routine to reset the device or clear a halt */
-static void hid_reset(void *_hid)
+static void hid_reset(struct work_struct *work)
 {
-       struct hid_device *hid = (struct hid_device *) _hid;
+       struct hid_device *hid =
+               container_of(work, struct hid_device, reset_work);
        int rc_lock, rc = 0;
 
        if (test_bit(HID_CLEAR_HALT, &hid->iofl)) {
@@ -1078,7 +1079,7 @@ static void hid_irq_in(struct urb *urb)
                        warn("input irq status %d received", urb->status);
        }
 
-       status = usb_submit_urb(urb, SLAB_ATOMIC);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
        if (status) {
                clear_bit(HID_IN_RUNNING, &hid->iofl);
                if (status != -EPERM) {
@@ -1863,13 +1864,13 @@ static void hid_find_max_report(struct hid_device *hid, unsigned int type, int *
 
 static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid)
 {
-       if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->inbuf_dma)))
+       if (!(hid->inbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->inbuf_dma)))
                return -1;
-       if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->outbuf_dma)))
+       if (!(hid->outbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->outbuf_dma)))
                return -1;
-       if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), SLAB_ATOMIC, &hid->cr_dma)))
+       if (!(hid->cr = usb_buffer_alloc(dev, sizeof(*(hid->cr)), GFP_ATOMIC, &hid->cr_dma)))
                return -1;
-       if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, SLAB_ATOMIC, &hid->ctrlbuf_dma)))
+       if (!(hid->ctrlbuf = usb_buffer_alloc(dev, hid->bufsize, GFP_ATOMIC, &hid->ctrlbuf_dma)))
                return -1;
 
        return 0;
@@ -2043,7 +2044,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
 
        init_waitqueue_head(&hid->wait);
 
-       INIT_WORK(&hid->reset_work, hid_reset, hid);
+       INIT_WORK(&hid->reset_work, hid_reset);
        setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid);
 
        spin_lock_init(&hid->inlock);
index 50aa8108a50b27d82b6cdb95a6d3f07b7cc06c67..98bd323369c72f6f6ab120073178c7bbdd144370 100644 (file)
@@ -456,7 +456,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
        remote->in_endpoint = endpoint;
        remote->toggle = -1;    /* Set to -1 so we will always not match the toggle from the first remote message. */
 
-       remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, SLAB_ATOMIC, &remote->in_dma);
+       remote->in_buffer = usb_buffer_alloc(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma);
        if (!remote->in_buffer) {
                retval = -ENOMEM;
                goto fail1;
index 79a85d46cb13b3f7bdd2a3ed1c783c7672ba4aa4..92c4e07da4c816e07759c395d93b98ab69f01225 100644 (file)
@@ -164,7 +164,7 @@ static int mtouchusb_alloc_buffers(struct usb_device *udev, struct mtouch_usb *m
        dbg("%s - called", __FUNCTION__);
 
        mtouch->data = usb_buffer_alloc(udev, MTOUCHUSB_REPORT_DATA_SIZE,
-                                       SLAB_ATOMIC, &mtouch->data_dma);
+                                       GFP_ATOMIC, &mtouch->data_dma);
 
        if (!mtouch->data)
                return -1;
index 0bf91778c40def170820b04b41ae949dc66a8b13..fea97e5437f8b87c6676f2aaa065401975070151 100644 (file)
@@ -277,12 +277,12 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
 static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
 {
        pm->data = usb_buffer_alloc(udev, POWERMATE_PAYLOAD_SIZE_MAX,
-                                   SLAB_ATOMIC, &pm->data_dma);
+                                   GFP_ATOMIC, &pm->data_dma);
        if (!pm->data)
                return -1;
 
        pm->configcr = usb_buffer_alloc(udev, sizeof(*(pm->configcr)),
-                                       SLAB_ATOMIC, &pm->configcr_dma);
+                                       GFP_ATOMIC, &pm->configcr_dma);
        if (!pm->configcr)
                return -1;
 
index 05c0d1ca39ab2497c72f6bc80955c36757850b24..2a314b0659225cc4fff70fe716427027fdd06e4f 100644 (file)
@@ -248,7 +248,7 @@ static int touchkit_alloc_buffers(struct usb_device *udev,
                                  struct touchkit_usb *touchkit)
 {
        touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE,
-                                         SLAB_ATOMIC, &touchkit->data_dma);
+                                         GFP_ATOMIC, &touchkit->data_dma);
 
        if (!touchkit->data)
                return -1;
index dac88640eab637532e37334afb2c1bf0a6486ab1..8505824848f6b7995bc40bd1cab9ef1ad3038a9c 100644 (file)
@@ -122,7 +122,7 @@ static void usb_kbd_irq(struct urb *urb)
        memcpy(kbd->old, kbd->new, 8);
 
 resubmit:
-       i = usb_submit_urb (urb, SLAB_ATOMIC);
+       i = usb_submit_urb (urb, GFP_ATOMIC);
        if (i)
                err ("can't resubmit intr, %s-%s/input0, status %d",
                                kbd->usbdev->bus->bus_name,
@@ -196,11 +196,11 @@ static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
                return -1;
        if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
                return -1;
-       if (!(kbd->new = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &kbd->new_dma)))
+       if (!(kbd->new = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
                return -1;
-       if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), SLAB_ATOMIC, &kbd->cr_dma)))
+       if (!(kbd->cr = usb_buffer_alloc(dev, sizeof(struct usb_ctrlrequest), GFP_ATOMIC, &kbd->cr_dma)))
                return -1;
-       if (!(kbd->leds = usb_buffer_alloc(dev, 1, SLAB_ATOMIC, &kbd->leds_dma)))
+       if (!(kbd->leds = usb_buffer_alloc(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
                return -1;
 
        return 0;
index 68a55642c082da8f77b2ef28c8eab73e5ef90e15..64a33e420cfbabb3b6758f25353c67b4b02ee55e 100644 (file)
@@ -86,7 +86,7 @@ static void usb_mouse_irq(struct urb *urb)
 
        input_sync(dev);
 resubmit:
-       status = usb_submit_urb (urb, SLAB_ATOMIC);
+       status = usb_submit_urb (urb, GFP_ATOMIC);
        if (status)
                err ("can't resubmit intr, %s-%s/input0, status %d",
                                mouse->usbdev->bus->bus_name,
@@ -137,7 +137,7 @@ static int usb_mouse_probe(struct usb_interface *intf, const struct usb_device_i
        if (!mouse || !input_dev)
                goto fail1;
 
-       mouse->data = usb_buffer_alloc(dev, 8, SLAB_ATOMIC, &mouse->data_dma);
+       mouse->data = usb_buffer_alloc(dev, 8, GFP_ATOMIC, &mouse->data_dma);
        if (!mouse->data)
                goto fail1;
 
index 49704d4ed0e296f7a850761a0c9bb84784f27797..7f3c57da9bc0767abf96a69e87f02b13fa296249 100644 (file)
@@ -680,7 +680,7 @@ static int usbtouch_probe(struct usb_interface *intf,
                type->process_pkt = usbtouch_process_pkt;
 
        usbtouch->data = usb_buffer_alloc(udev, type->rept_size,
-                                         SLAB_KERNEL, &usbtouch->data_dma);
+                                         GFP_KERNEL, &usbtouch->data_dma);
        if (!usbtouch->data)
                goto out_free;
 
index df97e5c803f9b08ab2a23f5552486c8541e430fc..e4bc76ebc83567ad325b72563c5b7c1a0a412b64 100644 (file)
@@ -325,7 +325,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                goto fail1;
 
        xpad->idata = usb_buffer_alloc(udev, XPAD_PKT_LEN,
-                                      SLAB_ATOMIC, &xpad->idata_dma);
+                                      GFP_ATOMIC, &xpad->idata_dma);
        if (!xpad->idata)
                goto fail1;
 
index 2268ca311ade7d4d0ee5246ce9141c17940e6b77..caff8e6d74480eec88e8355499f956f79136f0da 100644 (file)
@@ -874,17 +874,17 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        /* allocate usb buffers */
        yld->irq_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-                                       SLAB_ATOMIC, &yld->irq_dma);
+                                       GFP_ATOMIC, &yld->irq_dma);
        if (yld->irq_data == NULL)
                return usb_cleanup(yld, -ENOMEM);
 
        yld->ctl_data = usb_buffer_alloc(udev, USB_PKT_LEN,
-                                       SLAB_ATOMIC, &yld->ctl_dma);
+                                       GFP_ATOMIC, &yld->ctl_dma);
        if (!yld->ctl_data)
                return usb_cleanup(yld, -ENOMEM);
 
        yld->ctl_req = usb_buffer_alloc(udev, sizeof(*(yld->ctl_req)),
-                                       SLAB_ATOMIC, &yld->ctl_req_dma);
+                                       GFP_ATOMIC, &yld->ctl_req_dma);
        if (yld->ctl_req == NULL)
                return usb_cleanup(yld, -ENOMEM);
 
index ba30ca6a14aa8c373eb6793102d7804511db7121..02cbb7fff24f0c5e812450fc476b4ea2bc5f804b 100644 (file)
@@ -76,7 +76,7 @@ struct appledisplay {
        char *urbdata;                  /* interrupt URB data buffer */
        char *msgdata;                  /* control message data buffer */
 
-       struct work_struct work;
+       struct delayed_work work;
        int button_pressed;
        spinlock_t lock;
 };
@@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb)
        case ACD_BTN_BRIGHT_UP:
        case ACD_BTN_BRIGHT_DOWN:
                pdata->button_pressed = 1;
-               queue_work(wq, &pdata->work);
+               queue_delayed_work(wq, &pdata->work, 0);
                break;
        case ACD_BTN_NONE:
        default:
@@ -184,9 +184,10 @@ static struct backlight_properties appledisplay_bl_data = {
        .max_brightness = 0xFF
 };
 
-static void appledisplay_work(void *private)
+static void appledisplay_work(struct work_struct *work)
 {
-       struct appledisplay *pdata = private;
+       struct appledisplay *pdata =
+               container_of(work, struct appledisplay, work.work);
        int retval;
 
        up(&pdata->bd->sem);
@@ -238,7 +239,7 @@ static int appledisplay_probe(struct usb_interface *iface,
        pdata->udev = udev;
 
        spin_lock_init(&pdata->lock);
-       INIT_WORK(&pdata->work, appledisplay_work, pdata);
+       INIT_DELAYED_WORK(&pdata->work, appledisplay_work);
 
        /* Allocate buffer for control messages */
        pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL);
index cb0ba3107d7f76949687f268fd1037fcd0ea7e48..18b1925032a859fa4ce6b436359a15c7c81cb463 100644 (file)
@@ -156,9 +156,9 @@ struct usb_ftdi {
         struct usb_device *udev;
         struct usb_interface *interface;
         struct usb_class_driver *class;
-        struct work_struct status_work;
-        struct work_struct command_work;
-        struct work_struct respond_work;
+        struct delayed_work status_work;
+        struct delayed_work command_work;
+        struct delayed_work respond_work;
         struct u132_platform_data platform_data;
         struct resource resources[0];
         struct platform_device platform_dev;
@@ -210,23 +210,14 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi)
 
 static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        return;
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(status_queue, &ftdi->status_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(status_queue, &ftdi->status_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(status_queue, &ftdi->status_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
@@ -237,25 +228,14 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi)
 
 static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        return;
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(command_queue, &ftdi->command_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(command_queue, &ftdi->command_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(command_queue, &ftdi->command_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(command_queue, &ftdi->command_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
@@ -267,25 +247,14 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi)
 static void ftdi_response_requeue_work(struct usb_ftdi *ftdi,
         unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        return;
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                return;
-        kref_put(&ftdi->kref, ftdi_elan_delete);
-        return;
+       if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+               kref_put(&ftdi->kref, ftdi_elan_delete);
 }
 
 static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta)
 {
-        if (delta > 0) {
-                if (queue_delayed_work(respond_queue, &ftdi->respond_work,
-                        delta))
-                        kref_get(&ftdi->kref);
-        } else if (queue_work(respond_queue, &ftdi->respond_work))
-                kref_get(&ftdi->kref);
-        return;
+       if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta))
+               kref_get(&ftdi->kref);
 }
 
 static void ftdi_response_cancel_work(struct usb_ftdi *ftdi)
@@ -475,9 +444,11 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi)
         return;
 }
 
-static void ftdi_elan_command_work(void *data)
+static void ftdi_elan_command_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, command_work.work);
+
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -500,9 +471,10 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi)
         return;
 }
 
-static void ftdi_elan_respond_work(void *data)
+static void ftdi_elan_respond_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, respond_work.work);
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
                 return;
@@ -534,9 +506,10 @@ static void ftdi_elan_respond_work(void *data)
 * after the FTDI has been synchronized
 *
 */
-static void ftdi_elan_status_work(void *data)
+static void ftdi_elan_status_work(struct work_struct *work)
 {
-        struct usb_ftdi *ftdi = data;
+        struct usb_ftdi *ftdi =
+               container_of(work, struct usb_ftdi, status_work.work);
         int work_delay_in_msec = 0;
         if (ftdi->disconnected > 0) {
                 ftdi_elan_put_kref(ftdi);
@@ -2677,12 +2650,9 @@ static int ftdi_elan_probe(struct usb_interface *interface,
                 ftdi->class = NULL;
                 dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a"
                         "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber);
-                INIT_WORK(&ftdi->status_work, ftdi_elan_status_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->command_work, ftdi_elan_command_work,
-                        (void *)ftdi);
-                INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work,
-                        (void *)ftdi);
+                INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work);
+                INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work);
+                INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work);
                 ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000));
                 return 0;
         } else {
index 9110793f81d38dc0132cfb90035342d572b10a7a..371bf2b1197db8761ff01e20fe25ea97ce5fbb65 100644 (file)
@@ -81,8 +81,8 @@ struct interfacekit {
        unsigned char *data;
        dma_addr_t data_dma;
 
-       struct work_struct do_notify;
-       struct work_struct do_resubmit;
+       struct delayed_work do_notify;
+       struct delayed_work do_resubmit;
        unsigned long input_events;
        unsigned long sensor_events;
 };
@@ -374,19 +374,20 @@ static void interfacekit_irq(struct urb *urb)
        }
 
        if (kit->input_events || kit->sensor_events)
-               schedule_work(&kit->do_notify);
+               schedule_delayed_work(&kit->do_notify, 0);
 
 resubmit:
-       status = usb_submit_urb(urb, SLAB_ATOMIC);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
        if (status)
                err("can't resubmit intr, %s-%s/interfacekit0, status %d",
                        kit->udev->bus->bus_name,
                        kit->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-       struct interfacekit *kit = data;
+       struct interfacekit *kit =
+               container_of(work, struct interfacekit, do_notify.work);
        int i;
        char sysfs_file[8];
 
@@ -405,9 +406,11 @@ static void do_notify(void *data)
        }
 }
 
-static void do_resubmit(void *data)
+static void do_resubmit(struct work_struct *work)
 {
-       set_outputs(data);
+       struct interfacekit *kit =
+               container_of(work, struct interfacekit, do_resubmit.work);
+       set_outputs(kit);
 }
 
 #define show_set_output(value)         \
@@ -565,7 +568,7 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic
 
        kit->dev_no = -1;
        kit->ifkit = ifkit;
-       kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &kit->data_dma);
+       kit->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &kit->data_dma);
        if (!kit->data)
                goto out;
 
@@ -575,8 +578,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic
 
        kit->udev = usb_get_dev(dev);
        kit->intf = intf;
-       INIT_WORK(&kit->do_notify, do_notify, kit);
-       INIT_WORK(&kit->do_resubmit, do_resubmit, kit);
+       INIT_DELAYED_WORK(&kit->do_notify, do_notify);
+       INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit);
        usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        interfacekit_irq, kit, endpoint->bInterval);
index c3469b0a67c2d6d63059230741f74e12f60f13ad..5727e1ea2f91db1cd454545d16190ad32535779b 100644 (file)
@@ -41,7 +41,7 @@ struct motorcontrol {
        unsigned char *data;
        dma_addr_t data_dma;
 
-       struct work_struct do_notify;
+       struct delayed_work do_notify;
        unsigned long input_events;
        unsigned long speed_events;
        unsigned long exceed_events;
@@ -148,10 +148,10 @@ static void motorcontrol_irq(struct urb *urb)
                set_bit(1, &mc->exceed_events);
 
        if (mc->input_events || mc->exceed_events || mc->speed_events)
-               schedule_work(&mc->do_notify);
+               schedule_delayed_work(&mc->do_notify, 0);
 
 resubmit:
-       status = usb_submit_urb(urb, SLAB_ATOMIC);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
        if (status)
                dev_err(&mc->intf->dev,
                        "can't resubmit intr, %s-%s/motorcontrol0, status %d",
@@ -159,9 +159,10 @@ resubmit:
                        mc->udev->devpath, status);
 }
 
-static void do_notify(void *data)
+static void do_notify(struct work_struct *work)
 {
-       struct motorcontrol *mc = data;
+       struct motorcontrol *mc =
+               container_of(work, struct motorcontrol, do_notify.work);
        int i;
        char sysfs_file[8];
 
@@ -337,7 +338,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic
                goto out;
 
        mc->dev_no = -1;
-       mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, SLAB_ATOMIC, &mc->data_dma);
+       mc->data = usb_buffer_alloc(dev, URB_INT_SIZE, GFP_ATOMIC, &mc->data_dma);
        if (!mc->data)
                goto out;
 
@@ -348,7 +349,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic
        mc->udev = usb_get_dev(dev);
        mc->intf = intf;
        mc->acceleration[0] = mc->acceleration[1] = 10;
-       INIT_WORK(&mc->do_notify, do_notify, mc);
+       INIT_DELAYED_WORK(&mc->do_notify, do_notify);
        usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data,
                        maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp,
                        motorcontrol_irq, mc, endpoint->bInterval);
index 194065dbb51f0866831b0976b3da05c167178c06..fb321864a92da4b85e551b9534582c31eecbac0b 100644 (file)
@@ -213,7 +213,7 @@ static struct urb *simple_alloc_urb (
 
        if (bytes < 0)
                return NULL;
-       urb = usb_alloc_urb (0, SLAB_KERNEL);
+       urb = usb_alloc_urb (0, GFP_KERNEL);
        if (!urb)
                return urb;
        usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL);
@@ -223,7 +223,7 @@ static struct urb *simple_alloc_urb (
        urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
        if (usb_pipein (pipe))
                urb->transfer_flags |= URB_SHORT_NOT_OK;
-       urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+       urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
                        &urb->transfer_dma);
        if (!urb->transfer_buffer) {
                usb_free_urb (urb);
@@ -315,7 +315,7 @@ static int simple_io (
                init_completion (&completion);
                if (usb_pipeout (urb->pipe))
                        simple_fill_buf (urb);
-               if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0)
+               if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0)
                        break;
 
                /* NOTE:  no timeouts; can't be broken out of by interrupt */
@@ -374,7 +374,7 @@ alloc_sglist (int nents, int max, int vary)
        unsigned                i;
        unsigned                size = max;
 
-       sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL);
+       sg = kmalloc (nents * sizeof *sg, GFP_KERNEL);
        if (!sg)
                return NULL;
 
@@ -382,7 +382,7 @@ alloc_sglist (int nents, int max, int vary)
                char            *buf;
                unsigned        j;
 
-               buf = kzalloc (size, SLAB_KERNEL);
+               buf = kzalloc (size, GFP_KERNEL);
                if (!buf) {
                        free_sglist (sg, i);
                        return NULL;
@@ -428,7 +428,7 @@ static int perform_sglist (
                                (udev->speed == USB_SPEED_HIGH)
                                        ? (INTERRUPT_RATE << 3)
                                        : INTERRUPT_RATE,
-                               sg, nents, 0, SLAB_KERNEL);
+                               sg, nents, 0, GFP_KERNEL);
                
                if (retval)
                        break;
@@ -819,7 +819,7 @@ error:
 
        /* resubmit if we need to, else mark this as done */
        if ((status == 0) && (ctx->pending < ctx->count)) {
-               if ((status = usb_submit_urb (urb, SLAB_ATOMIC)) != 0) {
+               if ((status = usb_submit_urb (urb, GFP_ATOMIC)) != 0) {
                        dbg ("can't resubmit ctrl %02x.%02x, err %d",
                                reqp->bRequestType, reqp->bRequest, status);
                        urb->dev = NULL;
@@ -855,7 +855,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
         * as with bulk/intr sglists, sglen is the queue depth; it also
         * controls which subtests run (more tests than sglen) or rerun.
         */
-       urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL);
+       urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
        if (!urb)
                return -ENOMEM;
        for (i = 0; i < param->sglen; i++) {
@@ -981,7 +981,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
                if (!u)
                        goto cleanup;
 
-               reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL,
+               reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL,
                                &u->setup_dma);
                if (!reqp)
                        goto cleanup;
@@ -999,7 +999,7 @@ test_ctrl_queue (struct usbtest_dev *dev, struct usbtest_param *param)
        context.urb = urb;
        spin_lock_irq (&context.lock);
        for (i = 0; i < param->sglen; i++) {
-               context.status = usb_submit_urb (urb [i], SLAB_ATOMIC);
+               context.status = usb_submit_urb (urb [i], GFP_ATOMIC);
                if (context.status != 0) {
                        dbg ("can't submit urb[%d], status %d",
                                        i, context.status);
@@ -1041,7 +1041,7 @@ static void unlink1_callback (struct urb *urb)
 
        // we "know" -EPIPE (stall) never happens
        if (!status)
-               status = usb_submit_urb (urb, SLAB_ATOMIC);
+               status = usb_submit_urb (urb, GFP_ATOMIC);
        if (status) {
                urb->status = status;
                complete ((struct completion *) urb->context);
@@ -1067,7 +1067,7 @@ static int unlink1 (struct usbtest_dev *dev, int pipe, int size, int async)
         * FIXME want additional tests for when endpoint is STALLing
         * due to errors, or is just NAKing requests.
         */
-       if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) {
+       if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) {
                dev_dbg (&dev->intf->dev, "submit fail %d\n", retval);
                return retval;
        }
@@ -1251,7 +1251,7 @@ static int ctrl_out (struct usbtest_dev *dev,
        if (length < 1 || length > 0xffff || vary >= length)
                return -EINVAL;
 
-       buf = kmalloc(length, SLAB_KERNEL);
+       buf = kmalloc(length, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
@@ -1403,7 +1403,7 @@ static struct urb *iso_alloc_urb (
        maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
        packets = (bytes + maxp - 1) / maxp;
 
-       urb = usb_alloc_urb (packets, SLAB_KERNEL);
+       urb = usb_alloc_urb (packets, GFP_KERNEL);
        if (!urb)
                return urb;
        urb->dev = udev;
@@ -1411,7 +1411,7 @@ static struct urb *iso_alloc_urb (
 
        urb->number_of_packets = packets;
        urb->transfer_buffer_length = bytes;
-       urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL,
+       urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL,
                        &urb->transfer_dma);
        if (!urb->transfer_buffer) {
                usb_free_urb (urb);
@@ -1481,7 +1481,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param,
        spin_lock_irq (&context.lock);
        for (i = 0; i < param->sglen; i++) {
                ++context.pending;
-               status = usb_submit_urb (urbs [i], SLAB_ATOMIC);
+               status = usb_submit_urb (urbs [i], GFP_ATOMIC);
                if (status < 0) {
                        ERROR (dev, "submit iso[%d], error %d\n", i, status);
                        if (i == 0) {
@@ -1900,7 +1900,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
        }
 #endif
 
-       dev = kzalloc(sizeof(*dev), SLAB_KERNEL);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
        info = (struct usbtest_info *) id->driver_info;
@@ -1910,7 +1910,7 @@ usbtest_probe (struct usb_interface *intf, const struct usb_device_id *id)
        dev->intf = intf;
 
        /* cacheline-aligned scratch for i/o */
-       if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) {
+       if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) {
                kfree (dev);
                return -ENOMEM;
        }
index 7a2346c53284b93a502b47b5911159f343a70ecc..05cf2c9a8f84c3fbb42ffaaabf971d8bd2a526ea 100644 (file)
@@ -50,7 +50,7 @@ struct mon_event_text {
 
 #define SLAB_NAME_SZ  30
 struct mon_reader_text {
-       kmem_cache_t *e_slab;
+       struct kmem_cache *e_slab;
        int nevents;
        struct list_head e_list;
        struct mon_reader r;    /* In C, parent class can be placed anywhere */
@@ -63,7 +63,7 @@ struct mon_reader_text {
        char slab_name[SLAB_NAME_SZ];
 };
 
-static void mon_text_ctor(void *, kmem_cache_t *, unsigned long);
+static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
 
 /*
  * mon_text_submit
@@ -147,7 +147,7 @@ static void mon_text_event(struct mon_reader_text *rp, struct urb *urb,
        stamp = mon_get_timestamp();
 
        if (rp->nevents >= EVENT_MAX ||
-           (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+           (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
                rp->r.m_bus->cnt_text_lost++;
                return;
        }
@@ -188,7 +188,7 @@ static void mon_text_error(void *data, struct urb *urb, int error)
        struct mon_event_text *ep;
 
        if (rp->nevents >= EVENT_MAX ||
-           (ep = kmem_cache_alloc(rp->e_slab, SLAB_ATOMIC)) == NULL) {
+           (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) {
                rp->r.m_bus->cnt_text_lost++;
                return;
        }
@@ -450,7 +450,7 @@ const struct file_operations mon_fops_text = {
 /*
  * Slab interface: constructor.
  */
-static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags)
+static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags)
 {
        /*
         * Nothing to initialize. No, really!
index 907b820a5faf4ffd9cea63f1b17c588f8f69ab79..4852012735f66c207030abb7676136ff43a0f3cd 100644 (file)
@@ -345,7 +345,7 @@ static void catc_irq_done(struct urb *urb)
                } 
        }
 resubmit:
-       status = usb_submit_urb (urb, SLAB_ATOMIC);
+       status = usb_submit_urb (urb, GFP_ATOMIC);
        if (status)
                err ("can't resubmit intr, %s-%s, status %d",
                                catc->usbdev->bus->bus_name,
index 7c906a43e4973b081b9cdd20d068913e43a930bd..fa78326d0bf0c695f1771929d4e90b12e13ef4ff 100644 (file)
@@ -222,7 +222,7 @@ struct kaweth_device
        int suspend_lowmem_ctrl;
        int linkstate;
        int opened;
-       struct work_struct lowmem_work;
+       struct delayed_work lowmem_work;
 
        struct usb_device *dev;
        struct net_device *net;
@@ -530,9 +530,10 @@ resubmit:
        kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC);
 }
 
-static void kaweth_resubmit_tl(void *d)
+static void kaweth_resubmit_tl(struct work_struct *work)
 {
-       struct kaweth_device *kaweth = (struct kaweth_device *)d;
+       struct kaweth_device *kaweth =
+               container_of(work, struct kaweth_device, lowmem_work.work);
 
        if (IS_BLOCKED(kaweth->status))
                return;
@@ -1126,7 +1127,7 @@ err_fw:
 
        /* kaweth is zeroed as part of alloc_netdev */
 
-       INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth);
+       INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl);
 
        SET_MODULE_OWNER(netdev);
 
index a77410562e1208d76e7124386897acdba7f33efe..4936359545134db670434fbba9e011a97f943184 100644 (file)
@@ -383,7 +383,7 @@ static void nc_ensure_sync(struct usbnet *dev)
                int                     status;
 
                /* Send a flush */
-               urb = usb_alloc_urb(0, SLAB_ATOMIC);
+               urb = usb_alloc_urb(0, GFP_ATOMIC);
                if (!urb)
                        return;
 
index 69eb0db399df09f1c4ff5cb5dc9ec92d03974412..d48c024cff595ff2987be71fb33ab536c05ad5f9 100644 (file)
@@ -856,7 +856,7 @@ static void intr_callback(struct urb *urb)
                pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
        }
 
-       status = usb_submit_urb(urb, SLAB_ATOMIC);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
        if (status == -ENODEV)
                netif_device_detach(pegasus->net);
        if (status && netif_msg_timer(pegasus))
@@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
 static struct workqueue_struct *pegasus_workqueue = NULL;
 #define CARRIER_CHECK_DELAY (2 * HZ)
 
-static void check_carrier(void *data)
+static void check_carrier(struct work_struct *work)
 {
-       pegasus_t *pegasus = data;
+       pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
        set_carrier(pegasus->net);
        if (!(pegasus->flags & PEGASUS_UNPLUG)) {
                queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
@@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf,
 
        tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus);
 
-       INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus);
+       INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier);
 
        pegasus->intf = intf;
        pegasus->usb = dev;
index 006438069b66faa773215251fa6bb272fdc68a25..98f6898cae1f48350162686f1f33894d63e0fad7 100644 (file)
@@ -95,7 +95,7 @@ typedef struct pegasus {
        int                     dev_index;
        int                     intr_interval;
        struct tasklet_struct   rx_tl;
-       struct work_struct      carrier_check;
+       struct delayed_work     carrier_check;
        struct urb              *ctrl_urb, *rx_urb, *tx_urb, *intr_urb;
        struct sk_buff          *rx_pool[RX_SKBS];
        struct sk_buff          *rx_skb;
index c2a28d88ef3c8d174124790c0be00f116173d5c5..99f26b3e502f39dcd017c5854c635dabb435a16d 100644 (file)
@@ -469,7 +469,7 @@ static void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
        struct rndis_halt       *halt;
 
        /* try to clear any rndis state/activity (no i/o from stack!) */
-       halt = kcalloc(1, sizeof *halt, SLAB_KERNEL);
+       halt = kcalloc(1, sizeof *halt, GFP_KERNEL);
        if (halt) {
                halt->msg_type = RNDIS_MSG_HALT;
                halt->msg_len = ccpu2(sizeof *halt);
index 72171f94ded48eb648c061b92f648bda8f2600b4..c54235f73cb68b14c1a733536ac0411a91dc3093 100644 (file)
@@ -587,7 +587,7 @@ static void intr_callback(struct urb *urb)
        }
 
 resubmit:
-       status = usb_submit_urb (urb, SLAB_ATOMIC);
+       status = usb_submit_urb (urb, GFP_ATOMIC);
        if (status == -ENODEV)
                netif_device_detach(dev->netdev);
        else if (status)
index 7672e11c94c41b17cb9181101562c2dfbeb8d7fc..6e39e998825914acfaf899c5a9d77e2b13b59d31 100644 (file)
@@ -179,9 +179,9 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
        period = max ((int) dev->status->desc.bInterval,
                (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
 
-       buf = kmalloc (maxp, SLAB_KERNEL);
+       buf = kmalloc (maxp, GFP_KERNEL);
        if (buf) {
-               dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL);
+               dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
                if (!dev->interrupt) {
                        kfree (buf);
                        return -ENOMEM;
@@ -782,9 +782,10 @@ static struct ethtool_ops usbnet_ethtool_ops = {
  * especially now that control transfers can be queued.
  */
 static void
-kevent (void *data)
+kevent (struct work_struct *work)
 {
-       struct usbnet           *dev = data;
+       struct usbnet           *dev =
+               container_of(work, struct usbnet, kevent);
        int                     status;
 
        /* usb_clear_halt() needs a thread context */
@@ -1146,7 +1147,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        skb_queue_head_init (&dev->done);
        dev->bh.func = usbnet_bh;
        dev->bh.data = (unsigned long) dev;
-       INIT_WORK (&dev->kevent, kevent, dev);
+       INIT_WORK (&dev->kevent, kevent);
        dev->delay.function = usbnet_bh;
        dev->delay.data = (unsigned long) dev;
        init_timer (&dev->delay);
index b1b5707bc99af029e1e7f1f859357fcd507c12da..86bcf63b6ba5fa6e3e7e277f0db7de0a6367b032 100644 (file)
@@ -92,6 +92,7 @@ struct aircable_private {
        struct circ_buf *rx_buf;        /* read buffer */
        int rx_flags;                   /* for throttilng */
        struct work_struct rx_work;     /* work cue for the receiving line */
+       struct usb_serial_port *port;   /* USB port with which associated */
 };
 
 /* Private methods */
@@ -251,10 +252,11 @@ static void aircable_send(struct usb_serial_port *port)
        schedule_work(&port->work);
 }
 
-static void aircable_read(void *params)
+static void aircable_read(struct work_struct *work)
 {
-       struct usb_serial_port *port = params;
-       struct aircable_private *priv = usb_get_serial_port_data(port);
+       struct aircable_private *priv =
+               container_of(work, struct aircable_private, rx_work);
+       struct usb_serial_port *port = priv->port;
        struct tty_struct *tty;
        unsigned char *data;
        int count;
@@ -349,7 +351,8 @@ static int aircable_attach (struct usb_serial *serial)
        }
 
        priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED);
-       INIT_WORK(&priv->rx_work, aircable_read, port);
+       priv->port = port;
+       INIT_WORK(&priv->rx_work, aircable_read);
 
        usb_set_serial_port_data(serial->port[0], priv);
 
@@ -516,7 +519,7 @@ static void aircable_read_bulk_callback(struct urb *urb)
                                        package_length - shift);
                        }
                }
-               aircable_read(port);
+               aircable_read(&priv->rx_work);
        }
 
        /* Schedule the next read _if_ we are still open */
index 5e3ac281a2f8798515a08d0f083103effa070b53..83d0e21145b01cf9b1b46352eae664b51799a164 100644 (file)
@@ -430,13 +430,14 @@ struct digi_port {
        int dp_in_close;                        /* close in progress */
        wait_queue_head_t dp_close_wait;        /* wait queue for close */
        struct work_struct dp_wakeup_work;
+       struct usb_serial_port *dp_port;
 };
 
 
 /* Local Function Declarations */
 
 static void digi_wakeup_write( struct usb_serial_port *port );
-static void digi_wakeup_write_lock(void *);
+static void digi_wakeup_write_lock(struct work_struct *work);
 static int digi_write_oob_command( struct usb_serial_port *port,
        unsigned char *buf, int count, int interruptible );
 static int digi_write_inb_command( struct usb_serial_port *port,
@@ -598,11 +599,12 @@ static inline long cond_wait_interruptible_timeout_irqrestore(
 *  on writes.
 */
 
-static void digi_wakeup_write_lock(void *arg)
+static void digi_wakeup_write_lock(struct work_struct *work)
 {
-       struct usb_serial_port *port = arg;
+       struct digi_port *priv =
+               container_of(work, struct digi_port, dp_wakeup_work);
+       struct usb_serial_port *port = priv->dp_port;
        unsigned long flags;
-       struct digi_port *priv = usb_get_serial_port_data(port);
 
 
        spin_lock_irqsave( &priv->dp_port_lock, flags );
@@ -1702,8 +1704,8 @@ dbg( "digi_startup: TOP" );
                init_waitqueue_head( &priv->dp_flush_wait );
                priv->dp_in_close = 0;
                init_waitqueue_head( &priv->dp_close_wait );
-               INIT_WORK(&priv->dp_wakeup_work,
-                               digi_wakeup_write_lock, serial->port[i]);
+               INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
+               priv->dp_port = serial->port[i];
 
                /* initialize write wait queue for this port */
                init_waitqueue_head( &serial->port[i]->write_wait );
index 89ce2775be15c0fbd285a82861d65b75257d4508..72e4d48f51e9c18e9e6f4130dda00ad504261c0e 100644 (file)
@@ -559,7 +559,8 @@ struct ftdi_private {
        char prev_status, diff_status;        /* Used for TIOCMIWAIT */
        __u8 rx_flags;          /* receive state flags (throttling) */
        spinlock_t rx_lock;     /* spinlock for receive state */
-       struct work_struct rx_work;
+       struct delayed_work rx_work;
+       struct usb_serial_port *port;
        int rx_processed;
        unsigned long rx_bytes;
 
@@ -593,7 +594,7 @@ static int  ftdi_write_room         (struct usb_serial_port *port);
 static int  ftdi_chars_in_buffer       (struct usb_serial_port *port);
 static void ftdi_write_bulk_callback   (struct urb *urb);
 static void ftdi_read_bulk_callback    (struct urb *urb);
-static void ftdi_process_read          (void *param);
+static void ftdi_process_read          (struct work_struct *work);
 static void ftdi_set_termios           (struct usb_serial_port *port, struct termios * old);
 static int  ftdi_tiocmget               (struct usb_serial_port *port, struct file *file);
 static int  ftdi_tiocmset              (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear);
@@ -1201,7 +1202,8 @@ static int ftdi_sio_attach (struct usb_serial *serial)
                port->read_urb->transfer_buffer_length = BUFSZ;
        }
 
-       INIT_WORK(&priv->rx_work, ftdi_process_read, port);
+       INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read);
+       priv->port = port;
 
        /* Free port's existing write urb and transfer buffer. */
        if (port->write_urb) {
@@ -1640,17 +1642,18 @@ static void ftdi_read_bulk_callback (struct urb *urb)
        priv->rx_bytes += countread;
        spin_unlock_irqrestore(&priv->rx_lock, flags);
 
-       ftdi_process_read(port);
+       ftdi_process_read(&priv->rx_work.work);
 
 } /* ftdi_read_bulk_callback */
 
 
-static void ftdi_process_read (void *param)
+static void ftdi_process_read (struct work_struct *work)
 { /* ftdi_process_read */
-       struct usb_serial_port *port = (struct usb_serial_port*)param;
+       struct ftdi_private *priv =
+               container_of(work, struct ftdi_private, rx_work.work);
+       struct usb_serial_port *port = priv->port;
        struct urb *urb;
        struct tty_struct *tty;
-       struct ftdi_private *priv;
        char error_flag;
        unsigned char *data;
 
@@ -2179,7 +2182,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port)
        spin_unlock_irqrestore(&priv->rx_lock, flags);
 
        if (actually_throttled)
-               schedule_work(&priv->rx_work);
+               schedule_delayed_work(&priv->rx_work, 0);
 }
 
 static int __init ftdi_init (void)
index 909005107ea2465285f133ca1a0db5b82c2033fc..e09a0bfe62316f2ad890edbbb0a4ea1b5704fdb9 100644 (file)
@@ -120,6 +120,8 @@ struct keyspan_pda_private {
        int                     tx_throttled;
        struct work_struct                      wakeup_work;
        struct work_struct                      unthrottle_work;
+       struct usb_serial       *serial;
+       struct usb_serial_port  *port;
 };
 
 
@@ -175,9 +177,11 @@ static struct usb_device_id id_table_fake_xircom [] = {
 };
 #endif
 
-static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
+static void keyspan_pda_wakeup_write(struct work_struct *work)
 {
-
+       struct keyspan_pda_private *priv =
+               container_of(work, struct keyspan_pda_private, wakeup_work);
+       struct usb_serial_port *port = priv->port;
        struct tty_struct *tty = port->tty;
 
        /* wake up port processes */
@@ -187,8 +191,11 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port )
        tty_wakeup(tty);
 }
 
-static void keyspan_pda_request_unthrottle( struct usb_serial *serial )
+static void keyspan_pda_request_unthrottle(struct work_struct *work)
 {
+       struct keyspan_pda_private *priv =
+               container_of(work, struct keyspan_pda_private, unthrottle_work);
+       struct usb_serial *serial = priv->serial;
        int result;
 
        dbg(" request_unthrottle");
@@ -765,11 +772,10 @@ static int keyspan_pda_startup (struct usb_serial *serial)
                return (1); /* error */
        usb_set_serial_port_data(serial->port[0], priv);
        init_waitqueue_head(&serial->port[0]->write_wait);
-       INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write,
-                       (void *)(serial->port[0]));
-       INIT_WORK(&priv->unthrottle_work,
-                       (void *)keyspan_pda_request_unthrottle,
-                       (void *)(serial));
+       INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
+       INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
+       priv->serial = serial;
+       priv->port = serial->port[0];
        return (0);
 }
 
index 82cd15b894b0ffebb6d98fe02801c14f0e160eab..70f93b18292f7ce26c3a12ee7eb2c0c0d61e3d7b 100644 (file)
@@ -363,7 +363,7 @@ static int mos7720_open(struct usb_serial_port *port, struct file * filp)
 
        /* Initialising the write urb pool */
        for (j = 0; j < NUM_URBS; ++j) {
-               urb = usb_alloc_urb(0,SLAB_ATOMIC);
+               urb = usb_alloc_urb(0,GFP_ATOMIC);
                mos7720_port->write_urb_pool[j] = urb;
 
                if (urb == NULL) {
index 02c89e10b2cf103752c72d993d3aa7cf2cfd98b5..5432c63400862dd962336de2a051cfe88cc2c329 100644 (file)
@@ -826,7 +826,7 @@ static int mos7840_open(struct usb_serial_port *port, struct file *filp)
 
        /* Initialising the write urb pool */
        for (j = 0; j < NUM_URBS; ++j) {
-               urb = usb_alloc_urb(0, SLAB_ATOMIC);
+               urb = usb_alloc_urb(0, GFP_ATOMIC);
                mos7840_port->write_urb_pool[j] = urb;
 
                if (urb == NULL) {
@@ -2786,7 +2786,7 @@ static int mos7840_startup(struct usb_serial *serial)
                                    i + 1, status);
 
                }
-               mos7840_port->control_urb = usb_alloc_urb(0, SLAB_ATOMIC);
+               mos7840_port->control_urb = usb_alloc_urb(0, GFP_ATOMIC);
                mos7840_port->ctrl_buf = kmalloc(16, GFP_KERNEL);
 
        }
index c1257d5292f54d71565d1991726d18af03cbcfb9..3d5072f14b8d049081fbc2c88fad03bb157f6e9b 100644 (file)
@@ -533,9 +533,10 @@ void usb_serial_port_softint(struct usb_serial_port *port)
        schedule_work(&port->work);
 }
 
-static void usb_serial_port_work(void *private)
+static void usb_serial_port_work(struct work_struct *work)
 {
-       struct usb_serial_port *port = private;
+       struct usb_serial_port *port =
+               container_of(work, struct usb_serial_port, work);
        struct tty_struct *tty;
 
        dbg("%s - port %d", __FUNCTION__, port->number);
@@ -799,7 +800,7 @@ int usb_serial_probe(struct usb_interface *interface,
                port->serial = serial;
                spin_lock_init(&port->lock);
                mutex_init(&port->mutex);
-               INIT_WORK(&port->work, usb_serial_port_work, port);
+               INIT_WORK(&port->work, usb_serial_port_work);
                serial->port[i] = port;
        }
 
index 4d1cd7aeccd38c5493bef4e3b09caedeaa663080..154c7d290597776b82b9ec076db976b4eff228e6 100644 (file)
@@ -227,6 +227,7 @@ struct whiteheat_private {
        struct list_head        rx_urbs_submitted;
        struct list_head        rx_urb_q;
        struct work_struct      rx_work;
+       struct usb_serial_port  *port;
        struct list_head        tx_urbs_free;
        struct list_head        tx_urbs_submitted;
 };
@@ -241,7 +242,7 @@ static void command_port_read_callback(struct urb *urb);
 static int start_port_read(struct usb_serial_port *port);
 static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head);
 static struct list_head *list_first(struct list_head *head);
-static void rx_data_softint(void *private);
+static void rx_data_softint(struct work_struct *work);
 
 static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize);
 static int firm_open(struct usb_serial_port *port);
@@ -424,7 +425,8 @@ static int whiteheat_attach (struct usb_serial *serial)
                spin_lock_init(&info->lock);
                info->flags = 0;
                info->mcr = 0;
-               INIT_WORK(&info->rx_work, rx_data_softint, port);
+               INIT_WORK(&info->rx_work, rx_data_softint);
+               info->port = port;
 
                INIT_LIST_HEAD(&info->rx_urbs_free);
                INIT_LIST_HEAD(&info->rx_urbs_submitted);
@@ -949,7 +951,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port)
        spin_unlock_irqrestore(&info->lock, flags);
 
        if (actually_throttled)
-               rx_data_softint(port);
+               rx_data_softint(&info->rx_work);
 
        return;
 }
@@ -1400,10 +1402,11 @@ static struct list_head *list_first(struct list_head *head)
 }
 
 
-static void rx_data_softint(void *private)
+static void rx_data_softint(struct work_struct *work)
 {
-       struct usb_serial_port *port = (struct usb_serial_port *)private;
-       struct whiteheat_private *info = usb_get_serial_port_data(port);
+       struct whiteheat_private *info =
+               container_of(work, struct whiteheat_private, rx_work);
+       struct usb_serial_port *port = info->port;
        struct tty_struct *tty = port->tty;
        struct whiteheat_urb_wrap *wrap;
        struct urb *urb;
index 3a158d58441fc380d3997005e8ea27f58b8c57df..e565d3d2ab2909088bf7dde1d1372fb57fb7b25f 100644 (file)
@@ -76,7 +76,7 @@ static void usb_onetouch_irq(struct urb *urb)
        input_sync(dev);
 
 resubmit:
-       status = usb_submit_urb (urb, SLAB_ATOMIC);
+       status = usb_submit_urb (urb, GFP_ATOMIC);
        if (status)
                err ("can't resubmit intr, %s-%s/input0, status %d",
                        onetouch->udev->bus->bus_name,
@@ -154,7 +154,7 @@ int onetouch_connect_input(struct us_data *ss)
                goto fail1;
 
        onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
-                                         SLAB_ATOMIC, &onetouch->data_dma);
+                                         GFP_ATOMIC, &onetouch->data_dma);
        if (!onetouch->data)
                goto fail1;
 
index 47644b5b61553c4082257aa8cd4763587375c197..323293a3e61f08936ab16fcbc9820fdcc9056492 100644 (file)
@@ -427,7 +427,7 @@ static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
        US_DEBUGP("%s: xfer %u bytes, %d entries\n", __FUNCTION__,
                        length, num_sg);
        result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
-                       sg, num_sg, length, SLAB_NOIO);
+                       sg, num_sg, length, GFP_NOIO);
        if (result) {
                US_DEBUGP("usb_sg_init returned %d\n", result);
                return USB_STOR_XFER_ERROR;
index b401084b3d223a01061c3c24562b0e05cd2941dd..70644506651f2bf58042ea9f1c5707145c3cb5c9 100644 (file)
@@ -49,7 +49,7 @@
 
 #include <linux/sched.h>
 #include <linux/errno.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
index 302174b8e477df2eec923f51abe69bc7ab7a4e2e..31f476a64790e09dd6f005b42cee0800e8c0362f 100644 (file)
@@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc)
                softback_top = 0;
 }
 
-static void fb_flashcursor(void *private)
+static void fb_flashcursor(struct work_struct *work)
 {
-       struct fb_info *info = private;
+       struct fb_info *info = container_of(work, struct fb_info, queue);
        struct fbcon_ops *ops = info->fbcon_par;
        struct display *p;
        struct vc_data *vc = NULL;
@@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
        if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
            !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) {
                if (!info->queue.func)
-                       INIT_WORK(&info->queue, fb_flashcursor, info);
+                       INIT_WORK(&info->queue, fb_flashcursor);
 
                init_timer(&ops->cursor_timer);
                ops->cursor_timer.function = cursor_timer_handler;
index 0d3643fc6293ba76880b371779288f99d46306c4..a454dcb8e215a1fb60255acfb9940acec1ef8e56 100644 (file)
@@ -380,7 +380,7 @@ static void gxfb_remove(struct pci_dev *pdev)
 }
 
 static struct pci_device_id gxfb_id_table[] = {
-       { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_VIDEO,
+       { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO,
          PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
          0xff0000, 0 },
        { 0, }
index fdb33cd21a27211e1d78ba7ec778124c584885c7..cb26c6df0583f41cc94d30807e69aae468923ab2 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/prom.h>
 #include <asm/pgtable.h>
 #include <asm/of_device.h>
+#include <asm/of_platform.h>
 
 #include "macmodes.h"
 #include "platinumfb.h"
@@ -682,14 +683,14 @@ static int __init platinumfb_init(void)
                return -ENODEV;
        platinumfb_setup(option);
 #endif
-       of_register_driver(&platinum_driver);
+       of_register_platform_driver(&platinum_driver);
 
        return 0;
 }
 
 static void __exit platinumfb_exit(void)
 {
-       of_unregister_driver(&platinum_driver); 
+       of_unregister_platform_driver(&platinum_driver);
 }
 
 MODULE_LICENSE("GPL");
index 8a8ae55a7403fbe745a3981ef816575fd25b20b3..38eb0b69c2d7ab10961c0f7eedbf094a137e7ac6 100644 (file)
@@ -964,9 +964,10 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
  * Our LCD controller task (which is called when we blank or unblank)
  * via keventd.
  */
-static void pxafb_task(void *dummy)
+static void pxafb_task(struct work_struct *work)
 {
-       struct pxafb_info *fbi = dummy;
+       struct pxafb_info *fbi =
+               container_of(work, struct pxafb_info, task);
        u_int state = xchg(&fbi->task_state, -1);
 
        set_ctrlr_state(fbi, state);
@@ -1159,7 +1160,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
        }
 
        init_waitqueue_head(&fbi->ctrlr_wait);
-       INIT_WORK(&fbi->task, pxafb_task, fbi);
+       INIT_WORK(&fbi->task, pxafb_task);
        init_MUTEX(&fbi->ctrlr_sem);
 
        return fbi;
index 93845a2c7c21d92780854ca1f9460a6d7c9ae315..6bb0b54965f2ac3d1257cadea6a2d7911f2aa13e 100644 (file)
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire bus.
 #
 
-ifeq ($(CONFIG_W1_DS2433_CRC), y)
-EXTRA_CFLAGS   += -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1)       += wire.o
 wire-objs              := w1.o w1_int.o w1_family.o w1_netlink.o w1_io.o
 
index 70e21e2d70c334000cfaaf0b5133da3c0daa1f21..725dcfdfddb412d2de253cfc142e08b7cccf66af 100644 (file)
@@ -2,10 +2,6 @@
 # Makefile for the Dallas's 1-wire slaves.
 #
 
-ifeq ($(CONFIG_W1_SLAVE_DS2433_CRC), y)
-EXTRA_CFLAGS += -DCONFIG_W1_F23_CRC
-endif
-
 obj-$(CONFIG_W1_SLAVE_THERM)   += w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)    += w1_smem.o
 obj-$(CONFIG_W1_SLAVE_DS2433)  += w1_ds2433.o
index 2ac238f1480e234d05b260b5a78d9631f0858f49..8ea17a53eed853223be23df1e78878009ebf5577 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <linux/delay.h>
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 #include <linux/crc16.h>
 
 #define CRC16_INIT             0
@@ -62,7 +62,7 @@ static inline size_t w1_f23_fix_count(loff_t off, size_t count, size_t size)
        return count;
 }
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
                                int block)
 {
@@ -89,13 +89,13 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
 
        return 0;
 }
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
 
 static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
                               size_t count)
 {
        struct w1_slave *sl = kobj_to_w1_slave(kobj);
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        struct w1_f23_data *data = sl->family_data;
        int i, min_page, max_page;
 #else
@@ -107,7 +107,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
 
        mutex_lock(&sl->master->mutex);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
 
        min_page = (off >> W1_PAGE_BITS);
        max_page = (off + count - 1) >> W1_PAGE_BITS;
@@ -119,7 +119,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
        }
        memcpy(buf, &data->memory[off], count);
 
-#else  /* CONFIG_W1_F23_CRC */
+#else  /* CONFIG_W1_SLAVE_DS2433_CRC */
 
        /* read directly from the EEPROM */
        if (w1_reset_select_slave(sl)) {
@@ -133,7 +133,7 @@ static ssize_t w1_f23_read_bin(struct kobject *kobj, char *buf, loff_t off,
        w1_write_block(sl->master, wrbuf, 3);
        w1_read_block(sl->master, buf, count);
 
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
 
 out_up:
        mutex_unlock(&sl->master->mutex);
@@ -208,7 +208,7 @@ static ssize_t w1_f23_write_bin(struct kobject *kobj, char *buf, loff_t off,
        if ((count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE)) == 0)
                return 0;
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        /* can only write full blocks in cached mode */
        if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) {
                dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n",
@@ -223,7 +223,7 @@ static ssize_t w1_f23_write_bin(struct kobject *kobj, char *buf, loff_t off,
                        return -EINVAL;
                }
        }
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
 
        mutex_lock(&sl->master->mutex);
 
@@ -262,7 +262,7 @@ static struct bin_attribute w1_f23_bin_attr = {
 static int w1_f23_add_slave(struct w1_slave *sl)
 {
        int err;
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        struct w1_f23_data *data;
 
        data = kmalloc(sizeof(struct w1_f23_data), GFP_KERNEL);
@@ -271,24 +271,24 @@ static int w1_f23_add_slave(struct w1_slave *sl)
        memset(data, 0, sizeof(struct w1_f23_data));
        sl->family_data = data;
 
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
 
        err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        if (err)
                kfree(data);
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
 
        return err;
 }
 
 static void w1_f23_remove_slave(struct w1_slave *sl)
 {
-#ifdef CONFIG_W1_F23_CRC
+#ifdef CONFIG_W1_SLAVE_DS2433_CRC
        kfree(sl->family_data);
        sl->family_data = NULL;
-#endif /* CONFIG_W1_F23_CRC */
+#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
        sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
 }
 
index de3e9791f80d72fe8916b64286ab32d487ae96f1..63c07243993c1b1b50c007ffc6773eccf2e98441 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/atomic.h>
 
index 90a79c784549f108f4929a7865295deaf0a69b23..944273c3dbff5c6aa37dcf6ee492b50bdf594926 100644 (file)
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc {
 };
 
 static int v9fs_poll_proc(void *);
-static void v9fs_read_work(void *);
-static void v9fs_write_work(void *);
+static void v9fs_read_work(struct work_struct *work);
+static void v9fs_write_work(struct work_struct *work);
 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
                          poll_table * p);
 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
        m->rbuf = NULL;
        m->wpos = m->wsize = 0;
        m->wbuf = NULL;
-       INIT_WORK(&m->rq, v9fs_read_work, m);
-       INIT_WORK(&m->wq, v9fs_write_work, m);
+       INIT_WORK(&m->rq, v9fs_read_work);
+       INIT_WORK(&m->wq, v9fs_write_work);
        m->wsched = 0;
        memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
        m->poll_task = NULL;
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a)
 /**
  * v9fs_write_work - called when a transport can send some data
  */
-static void v9fs_write_work(void *a)
+static void v9fs_write_work(struct work_struct *work)
 {
        int n, err;
        struct v9fs_mux_data *m;
        struct v9fs_req *req;
 
-       m = a;
+       m = container_of(work, struct v9fs_mux_data, wq);
 
        if (m->err < 0) {
                clear_bit(Wworksched, &m->wsched);
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
 /**
  * v9fs_read_work - called when there is some data to be read from a transport
  */
-static void v9fs_read_work(void *a)
+static void v9fs_read_work(struct work_struct *work)
 {
        int n, err;
        struct v9fs_mux_data *m;
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a)
        struct v9fs_fcall *rcall;
        char *rbuf;
 
-       m = a;
+       m = container_of(work, struct v9fs_mux_data, rq);
 
        if (m->err < 0)
                return;
index 5241c600ce28be2f00f0eb454b41db882beab1a8..18f26cdfd882792fecc12113c83e129dc7bcab9e 100644 (file)
@@ -256,7 +256,7 @@ static int
 v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm,
        u8 mode, char *extension, u32 *fidp, struct v9fs_qid *qid, u32 *iounit)
 {
-       u32 fid;
+       int fid;
        int err;
        struct v9fs_fcall *fcall;
 
@@ -310,7 +310,7 @@ static struct v9fs_fid*
 v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry)
 {
        int err;
-       u32 nfid;
+       int nfid;
        struct v9fs_fid *ret;
        struct v9fs_fcall *fcall;
 
index 9ade139086fcebb7c48206a686ac06011edea40c..5023351a7afe652fcd9af92e9201b7cc2029081c 100644 (file)
@@ -36,7 +36,7 @@ void __adfs_error(struct super_block *sb, const char *function, const char *fmt,
        va_list args;
 
        va_start(args, fmt);
-       vsprintf(error_buf, fmt, args);
+       vsnprintf(error_buf, sizeof(error_buf), fmt, args);
        va_end(args);
 
        printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %s\n",
@@ -212,12 +212,12 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
-static kmem_cache_t *adfs_inode_cachep;
+static struct kmem_cache *adfs_inode_cachep;
 
 static struct inode *adfs_alloc_inode(struct super_block *sb)
 {
        struct adfs_inode_info *ei;
-       ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL);
+       ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode)
        kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;
 
index ccd624ef4272d742b0f98413aa741064d4b6d09d..f4de4b98004fa1f9f08cbe2c6310eb805aaafd1d 100644 (file)
@@ -445,7 +445,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
        va_list  args;
 
        va_start(args,fmt);
-       vsprintf(ErrorBuffer,fmt,args);
+       vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
        va_end(args);
 
        printk(KERN_CRIT "AFFS error (device %s): %s(): %s\n", sb->s_id,
@@ -461,7 +461,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
        va_list  args;
 
        va_start(args,fmt);
-       vsprintf(ErrorBuffer,fmt,args);
+       vsnprintf(ErrorBuffer,sizeof(ErrorBuffer),fmt,args);
        va_end(args);
 
        printk(KERN_WARNING "AFFS warning (device %s): %s(): %s\n", sb->s_id,
index b0b953683c1a81da13141bbc6de13d0ba09aea33..b330009fe42dbdb02ca987169981a9e67706e7e4 100644 (file)
@@ -289,12 +289,11 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
        sbi->s_bmap_count = (sbi->s_partition_size - sbi->s_reserved +
                                 sbi->s_bmap_bits - 1) / sbi->s_bmap_bits;
        size = sbi->s_bmap_count * sizeof(*bm);
-       bm = sbi->s_bitmap = kmalloc(size, GFP_KERNEL);
+       bm = sbi->s_bitmap = kzalloc(size, GFP_KERNEL);
        if (!sbi->s_bitmap) {
                printk(KERN_ERR "AFFS: Bitmap allocation failed\n");
                return -ENOMEM;
        }
-       memset(sbi->s_bitmap, 0, size);
 
        bmap_blk = (__be32 *)sbi->s_root_bh->b_data;
        blk = sb->s_blocksize / 4 - 49;
index 5ea72c3a16c3b52d4654d9a65223a09b6c87cbb4..3de93e799949cc9289ece4a0b13486221d2a136b 100644 (file)
@@ -66,12 +66,12 @@ affs_write_super(struct super_block *sb)
        pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
 }
 
-static kmem_cache_t * affs_inode_cachep;
+static struct kmem_cache * affs_inode_cachep;
 
 static struct inode *affs_alloc_inode(struct super_block *sb)
 {
        struct affs_inode_info *ei;
-       ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL);
+       ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        ei->vfs_inode.i_version = 1;
@@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode)
        kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct affs_inode_info *ei = (struct affs_inode_info *) foo;
 
index f09a794f248e33a4b8ad192acbf957aec2dd399b..615df2407cb2ec5f166d81e3e4dbe9837c38506c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "server.h"
 #include "volume.h"
index 65bc05ab81826b417513f203a5b7e930d7b9fed7..694344e4d3c7503ca03750549495bc6cad8fef1a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include "cell.h"
 #include "volume.h"
 #include "kafstimod.h"
index 22afaae1a4ce53417d3e52900817d25245f99395..44aff81dc6a74fa4cf924a32586e2c91d2f2febf 100644 (file)
@@ -55,13 +55,12 @@ int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr,
        _enter("%p,%08x,", cell, ntohl(addr->s_addr));
 
        /* allocate and initialise a server record */
-       server = kmalloc(sizeof(struct afs_server), GFP_KERNEL);
+       server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
        if (!server) {
                _leave(" = -ENOMEM");
                return -ENOMEM;
        }
 
-       memset(server, 0, sizeof(struct afs_server));
        atomic_set(&server->usage, 1);
 
        INIT_LIST_HEAD(&server->link);
index 67d1f5c819eccc08936e9c11ad0b0bef58c7118f..18d9b77ba40fe5ff081da9d95de551dd4978071a 100644 (file)
@@ -35,7 +35,7 @@ struct afs_mount_params {
        struct afs_volume       *volume;
 };
 
-static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
+static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
                            unsigned long flags);
 
 static int afs_get_sb(struct file_system_type *fs_type,
@@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = {
        .put_super      = afs_put_super,
 };
 
-static kmem_cache_t *afs_inode_cachep;
+static struct kmem_cache *afs_inode_cachep;
 static atomic_t afs_count_active_inodes;
 
 /*****************************************************************************/
@@ -242,14 +242,12 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
        kenter("");
 
        /* allocate a superblock info record */
-       as = kmalloc(sizeof(struct afs_super_info), GFP_KERNEL);
+       as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
        if (!as) {
                _leave(" = -ENOMEM");
                return -ENOMEM;
        }
 
-       memset(as, 0, sizeof(struct afs_super_info));
-
        afs_get_volume(params->volume);
        as->volume = params->volume;
 
@@ -384,7 +382,7 @@ static void afs_put_super(struct super_block *sb)
 /*
  * initialise an inode cache slab element prior to any use
  */
-static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep,
+static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
                            unsigned long flags)
 {
        struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
@@ -412,7 +410,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
        struct afs_vnode *vnode;
 
        vnode = (struct afs_vnode *)
-               kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL);
+               kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
        if (!vnode)
                return NULL;
 
index 277a5f2d18ad7b8ca66a3aef65551379054fc724..d3a6ec2c9627e1e58a769c93398f2a3787bdfda8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -47,19 +47,19 @@ unsigned long aio_nr;               /* current system wide number of aio requests */
 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
 /*----end sysctl variables---*/
 
-static kmem_cache_t    *kiocb_cachep;
-static kmem_cache_t    *kioctx_cachep;
+static struct kmem_cache       *kiocb_cachep;
+static struct kmem_cache       *kioctx_cachep;
 
 static struct workqueue_struct *aio_wq;
 
 /* Used for rare fput completion. */
-static void aio_fput_routine(void *);
-static DECLARE_WORK(fput_work, aio_fput_routine, NULL);
+static void aio_fput_routine(struct work_struct *);
+static DECLARE_WORK(fput_work, aio_fput_routine);
 
 static DEFINE_SPINLOCK(fput_lock);
 static LIST_HEAD(fput_head);
 
-static void aio_kick_handler(void *);
+static void aio_kick_handler(struct work_struct *);
 static void aio_queue_work(struct kioctx *);
 
 /* aio_setup
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        INIT_LIST_HEAD(&ctx->active_reqs);
        INIT_LIST_HEAD(&ctx->run_list);
-       INIT_WORK(&ctx->wq, aio_kick_handler, ctx);
+       INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
 
        if (aio_setup_ring(ctx) < 0)
                goto out_freectx;
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
                wake_up(&ctx->wait);
 }
 
-static void aio_fput_routine(void *data)
+static void aio_fput_routine(struct work_struct *data)
 {
        spin_lock_irq(&fput_lock);
        while (likely(!list_empty(&fput_head))) {
@@ -666,17 +666,6 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
        ssize_t (*retry)(struct kiocb *);
        ssize_t ret;
 
-       if (iocb->ki_retried++ > 1024*1024) {
-               printk("Maximal retry count.  Bytes done %Zd\n",
-                       iocb->ki_nbytes - iocb->ki_left);
-               return -EAGAIN;
-       }
-
-       if (!(iocb->ki_retried & 0xff)) {
-               pr_debug("%ld retry: %zd of %zd\n", iocb->ki_retried,
-                       iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
-       }
-
        if (!(retry = iocb->ki_retry)) {
                printk("aio_run_iocb: iocb->ki_retry = NULL\n");
                return 0;
@@ -857,9 +846,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx)
  *      space.
  * Run on aiod's context.
  */
-static void aio_kick_handler(void *data)
+static void aio_kick_handler(struct work_struct *work)
 {
-       struct kioctx *ctx = data;
+       struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
        mm_segment_t oldfs = get_fs();
        int requeue;
 
@@ -874,7 +863,7 @@ static void aio_kick_handler(void *data)
         * we're in a worker thread already, don't use queue_delayed_work,
         */
        if (requeue)
-               queue_work(aio_wq, &ctx->wq);
+               queue_delayed_work(aio_wq, &ctx->wq, 0);
 }
 
 
@@ -1005,9 +994,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
        kunmap_atomic(ring, KM_IRQ1);
 
        pr_debug("added to ring %p at [%lu]\n", iocb, tail);
-
-       pr_debug("%ld retries: %zd of %zd\n", iocb->ki_retried,
-               iocb->ki_nbytes - iocb->ki_left, iocb->ki_nbytes);
 put_rq:
        /* everything turned out well, dispose of the aiocb. */
        ret = __aio_put_req(ctx, iocb);
@@ -1413,7 +1399,6 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
        kiocb->ki_iovec->iov_len = kiocb->ki_left;
        kiocb->ki_nr_segs = 1;
        kiocb->ki_cur_seg = 0;
-       kiocb->ki_nbytes = kiocb->ki_left;
        return 0;
 }
 
@@ -1591,7 +1576,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        req->ki_opcode = iocb->aio_lio_opcode;
        init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
        INIT_LIST_HEAD(&req->ki_wait.task_list);
-       req->ki_retried = 0;
 
        ret = aio_setup_iocb(req);
 
index 38ede5c9d6fd412c8cddfed701afae9ae46cfa14..f968d134280807bf7aaafb50d6aeb22a6a45d304 100644 (file)
@@ -28,10 +28,11 @@ void autofs_kill_sb(struct super_block *sb)
        /*
         * In the event of a failure in get_sb_nodev the superblock
         * info is not present so nothing else has been setup, so
-        * just exit when we are called from deactivate_super.
+        * just call kill_anon_super when we are called from
+        * deactivate_super.
         */
        if (!sbi)
-               return;
+               goto out_kill_sb;
 
        if ( !sbi->catatonic )
                autofs_catatonic_mode(sbi); /* Free wait queues, close pipe */
@@ -44,6 +45,7 @@ void autofs_kill_sb(struct super_block *sb)
 
        kfree(sb->s_fs_info);
 
+out_kill_sb:
        DPRINTK(("autofs: shutting down\n"));
        kill_anon_super(sb);
 }
@@ -209,7 +211,6 @@ fail_iput:
 fail_free:
        kfree(sbi);
        s->s_fs_info = NULL;
-       kill_anon_super(s);
 fail_unlock:
        return -EINVAL;
 }
index ce7c0f1dd529211b2f4e5eca8feba0c87428c33f..9c48250fd726406fc040816c96b4fa42e8d305ce 100644 (file)
@@ -152,10 +152,11 @@ void autofs4_kill_sb(struct super_block *sb)
        /*
         * In the event of a failure in get_sb_nodev the superblock
         * info is not present so nothing else has been setup, so
-        * just exit when we are called from deactivate_super.
+        * just call kill_anon_super when we are called from
+        * deactivate_super.
         */
        if (!sbi)
-               return;
+               goto out_kill_sb;
 
        sb->s_fs_info = NULL;
 
@@ -167,6 +168,7 @@ void autofs4_kill_sb(struct super_block *sb)
 
        kfree(sbi);
 
+out_kill_sb:
        DPRINTK("shutting down");
        kill_anon_super(sb);
 }
@@ -426,7 +428,6 @@ fail_ino:
 fail_free:
        kfree(sbi);
        s->s_fs_info = NULL;
-       kill_anon_super(s);
 fail_unlock:
        return -EINVAL;
 }
index 07f7144f0e2e942feb736e33b315c8baae03cb20..bce402eee55426a9aad5ee92044b841c71811068 100644 (file)
@@ -61,7 +61,7 @@ static const struct super_operations befs_sops = {
 };
 
 /* slab cache for befs_inode_info objects */
-static kmem_cache_t *befs_inode_cachep;
+static struct kmem_cache *befs_inode_cachep;
 
 static const struct file_operations befs_dir_operations = {
        .read           = generic_read_dir,
@@ -277,7 +277,7 @@ befs_alloc_inode(struct super_block *sb)
 {
         struct befs_inode_info *bi;
         bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
-                                                       SLAB_KERNEL);
+                                                       GFP_KERNEL);
         if (!bi)
                 return NULL;
         return &bi->vfs_inode;
@@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode)
         kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
         struct befs_inode_info *bi = (struct befs_inode_info *) foo;
        
index ed27ffb3459e9ea73ea23b476b27cf1bf95d0320..eac175ed9f445d999b3f8e5458f3a395de4c4a06 100644 (file)
@@ -228,12 +228,12 @@ static void bfs_write_super(struct super_block *s)
        unlock_kernel();
 }
 
-static kmem_cache_t * bfs_inode_cachep;
+static struct kmem_cache * bfs_inode_cachep;
 
 static struct inode *bfs_alloc_inode(struct super_block *sb)
 {
        struct bfs_inode_info *bi;
-       bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL);
+       bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
        if (!bi)
                return NULL;
        return &bi->vfs_inode;
@@ -244,7 +244,7 @@ static void bfs_destroy_inode(struct inode *inode)
        kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct bfs_inode_info *bi = foo;
 
index 79b05a1a436582ebfd2415a682cce4a322be00d5..be5869d349991d69b9f1bafdc6258bc9f5fac6a6 100644 (file)
@@ -47,10 +47,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
 static int load_elf_library(struct file *);
 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
 
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
-
 /*
  * If we don't support core dumping, then supply a NULL so we
  * don't even try.
@@ -243,8 +239,9 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        if (interp_aout) {
                argv = sp + 2;
                envp = argv + argc + 1;
-               __put_user((elf_addr_t)(unsigned long)argv, sp++);
-               __put_user((elf_addr_t)(unsigned long)envp, sp++);
+               if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
+                   __put_user((elf_addr_t)(unsigned long)envp, sp++))
+                       return -EFAULT;
        } else {
                argv = sp;
                envp = argv + argc + 1;
@@ -254,7 +251,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        p = current->mm->arg_end = current->mm->arg_start;
        while (argc-- > 0) {
                size_t len;
-               __put_user((elf_addr_t)p, argv++);
+               if (__put_user((elf_addr_t)p, argv++))
+                       return -EFAULT;
                len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
                if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
                        return 0;
@@ -265,7 +263,8 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
        current->mm->arg_end = current->mm->env_start = p;
        while (envc-- > 0) {
                size_t len;
-               __put_user((elf_addr_t)p, envp++);
+               if (__put_user((elf_addr_t)p, envp++))
+                       return -EFAULT;
                len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
                if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
                        return 0;
@@ -545,7 +544,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        unsigned long reloc_func_desc = 0;
        char passed_fileno[6];
        struct files_struct *files;
-       int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
+       int executable_stack = EXSTACK_DEFAULT;
        unsigned long def_flags = 0;
        struct {
                struct elfhdr elf_ex;
@@ -708,7 +707,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                                executable_stack = EXSTACK_DISABLE_X;
                        break;
                }
-       have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
 
        /* Some simple consistency checks for the interpreter */
        if (elf_interpreter) {
@@ -856,7 +854,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
                         * default mmap base, as well as whatever program they
                         * might try to exec.  This is because the brk will
                         * follow the loader, and is not movable.  */
-                       load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
+                       if (current->flags & PF_RANDOMIZE)
+                               load_bias = randomize_range(0x10000,
+                                                           ELF_ET_DYN_BASE,
+                                                           0);
+                       else
+                               load_bias = ELF_ET_DYN_BASE;
+                       load_bias = ELF_PAGESTART(load_bias - vaddr);
                }
 
                error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
@@ -1582,6 +1586,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
                
                sz += thread_status_size;
 
+#ifdef ELF_CORE_WRITE_EXTRA_NOTES
+               sz += ELF_CORE_EXTRA_NOTES_SIZE;
+#endif
+
                fill_elf_note_phdr(&phdr, sz, offset);
                offset += sz;
                DUMP_WRITE(&phdr, sizeof(phdr));
@@ -1622,6 +1630,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
                if (!writenote(notes + i, file, &foffset))
                        goto end_coredump;
 
+#ifdef ELF_CORE_WRITE_EXTRA_NOTES
+       ELF_CORE_WRITE_EXTRA_NOTES;
+#endif
+
        /* write out the thread status notes section */
        list_for_each(t, &thread_list) {
                struct elf_thread_status *tmp =
index f86d5c9ce5ebda5eaf4c94fbd441dd770a24b9c4..ed9a61c6beb33a3132594e8322dd26d4af107301 100644 (file)
@@ -40,9 +40,6 @@
 #include <asm/pgalloc.h>
 
 typedef char *elf_caddr_t;
-#ifndef elf_addr_t
-#define elf_addr_t unsigned long
-#endif
 
 #if 0
 #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
index aa4d09bd4e711872589b5b7a5e5ccf8b88952713..7ec737eda72bba71718d433820eae4e0c0d6a489 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -30,7 +30,7 @@
 
 #define BIO_POOL_SIZE 256
 
-static kmem_cache_t *bio_slab __read_mostly;
+static struct kmem_cache *bio_slab __read_mostly;
 
 #define BIOVEC_NR_POOLS 6
 
@@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly;
 struct biovec_slab {
        int nr_vecs;
        char *name; 
-       kmem_cache_t *slab;
+       struct kmem_cache *slab;
 };
 
 /*
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio)
  * run one bio_put() against the BIO.
  */
 
-static void bio_dirty_fn(void *data);
+static void bio_dirty_fn(struct work_struct *work);
 
-static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL);
+static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
 static DEFINE_SPINLOCK(bio_dirty_lock);
 static struct bio *bio_dirty_list;
 
 /*
  * This runs in process context
  */
-static void bio_dirty_fn(void *data)
+static void bio_dirty_fn(struct work_struct *work)
 {
        unsigned long flags;
        struct bio *bio;
index 36c0e7af9d0f18df487aab2420414fd95b1f1302..13816b4d76f61014bfcb131f9a37b3fd210bad3b 100644 (file)
@@ -235,11 +235,11 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
  */
 
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
-static kmem_cache_t * bdev_cachep __read_mostly;
+static struct kmem_cache * bdev_cachep __read_mostly;
 
 static struct inode *bdev_alloc_inode(struct super_block *sb)
 {
-       struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL);
+       struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -253,7 +253,7 @@ static void bdev_destroy_inode(struct inode *inode)
        kmem_cache_free(bdev_cachep, bdi);
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct bdev_inode *ei = (struct bdev_inode *) foo;
        struct block_device *bdev = &ei->bdev;
index 35527dca1dbcc415d95d76780537c577808df9ff..517860f2d75b0d575e65af71f54745840a3a28ad 100644 (file)
@@ -2908,7 +2908,7 @@ asmlinkage long sys_bdflush(int func, long data)
 /*
  * Buffer-head allocation
  */
-static kmem_cache_t *bh_cachep;
+static struct kmem_cache *bh_cachep;
 
 /*
  * Once the number of bh's in the machine exceeds this level, we start
@@ -2961,7 +2961,7 @@ void free_buffer_head(struct buffer_head *bh)
 EXPORT_SYMBOL(free_buffer_head);
 
 static void
-init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
+init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
                            SLAB_CTOR_CONSTRUCTOR) {
@@ -2972,7 +2972,6 @@ init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static void buffer_exit_cpu(int cpu)
 {
        int i;
@@ -2994,7 +2993,6 @@ static int buffer_cpu_notify(struct notifier_block *self,
                buffer_exit_cpu((unsigned long)hcpu);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init buffer_init(void)
 {
index 84976cdbe7136c4b76ad0777c924d44fd4b613cd..71bc87a37fc1a727d9d7e8ce66d425d1f54621bb 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mempool.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
 #define DECLARE_GLOBALS_HERE
@@ -81,7 +82,7 @@ extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static int
 cifs_read_super(struct super_block *sb, void *data,
@@ -232,11 +233,11 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
                return generic_permission(inode, mask, NULL);
 }
 
-static kmem_cache_t *cifs_inode_cachep;
-static kmem_cache_t *cifs_req_cachep;
-static kmem_cache_t *cifs_mid_cachep;
-kmem_cache_t *cifs_oplock_cachep;
-static kmem_cache_t *cifs_sm_req_cachep;
+static struct kmem_cache *cifs_inode_cachep;
+static struct kmem_cache *cifs_req_cachep;
+static struct kmem_cache *cifs_mid_cachep;
+struct kmem_cache *cifs_oplock_cachep;
+static struct kmem_cache *cifs_sm_req_cachep;
 mempool_t *cifs_sm_req_poolp;
 mempool_t *cifs_req_poolp;
 mempool_t *cifs_mid_poolp;
@@ -245,7 +246,7 @@ static struct inode *
 cifs_alloc_inode(struct super_block *sb)
 {
        struct cifsInodeInfo *cifs_inode;
-       cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
+       cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
        if (!cifs_inode)
                return NULL;
        cifs_inode->cifsAttrs = 0x20;   /* default */
@@ -668,7 +669,7 @@ const struct file_operations cifs_dir_ops = {
 };
 
 static void
-cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
+cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
 {
        struct cifsInodeInfo *cifsi = inode;
 
index 71f77914ce9334e3134c3400385e62b92efd8e96..2caca06b4bae214327c3fccc7a046f6a0269ac37 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/completion.h>
 #include <linux/pagevec.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
 #include "cifspdu.h"
index bbc9cd34b6ea42f02d85e9e31c140824b4ab5dd8..aedf683f011fe42606f7d58bb5005d1744dae543 100644 (file)
@@ -153,7 +153,7 @@ cifs_buf_get(void)
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
        ret_buf =
-           (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+           (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS);
 
        /* clear the first few header bytes */
        /* for most paths, more is cleared in header_assemble */
@@ -192,7 +192,7 @@ cifs_small_buf_get(void)
    albeit slightly larger than necessary and maxbuffersize 
    defaults to this and can not be bigger */
        ret_buf =
-           (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | SLAB_NOFS);
+           (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS);
        if (ret_buf) {
        /* No need to clear memory here, cleared in header assemble */
        /*      memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
index 48d47b46b1fb05597d662e88a9b342a883cc21b9..f80007eaebf4100c67224e4ffa524d2c6c517517 100644 (file)
@@ -34,7 +34,7 @@
 #include "cifs_debug.h"
   
 extern mempool_t *cifs_mid_poolp;
-extern kmem_cache_t *cifs_oplock_cachep;
+extern struct kmem_cache *cifs_oplock_cachep;
 
 static struct mid_q_entry *
 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
@@ -51,7 +51,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
        }
        
        temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,
-                                                   SLAB_KERNEL | SLAB_NOFS);
+                                                   GFP_KERNEL | GFP_NOFS);
        if (temp == NULL)
                return temp;
        else {
@@ -118,7 +118,7 @@ AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon)
                return NULL;
        }
        temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
-                                                      SLAB_KERNEL);
+                                                      GFP_KERNEL);
        if (temp == NULL)
                return temp;
        else {
index 88d12332116489bd6dc2cb475dc8727cffe88a18..b64659fa82d0e6c94ffcf93d81a2cfb1fd70686a 100644 (file)
@@ -38,12 +38,12 @@ static void coda_clear_inode(struct inode *);
 static void coda_put_super(struct super_block *);
 static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
 
-static kmem_cache_t * coda_inode_cachep;
+static struct kmem_cache * coda_inode_cachep;
 
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
        struct coda_inode_info *ei;
-       ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL);
+       ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode)
        kmem_cache_free(coda_inode_cachep, ITOC(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct coda_inode_info *ei = (struct coda_inode_info *) foo;
 
index 06dad665b88fce8f412dc10303a527fefeb2278a..a7e3f162fb15f9694425384710524f2dc122ff35 100644 (file)
@@ -871,7 +871,7 @@ asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name,
 
        retval = -EINVAL;
 
-       if (type_page) {
+       if (type_page && data_page) {
                if (!strcmp((char *)type_page, SMBFS_NAME)) {
                        do_smb_super_data_conv((void *)data_page);
                } else if (!strcmp((char *)type_page, NCPFS_NAME)) {
@@ -1144,7 +1144,9 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        lastdirent = buf.previous;
        if (lastdirent) {
                typeof(lastdirent->d_off) d_off = file->f_pos;
-               __put_user_unaligned(d_off, &lastdirent->d_off);
+               error = -EFAULT;
+               if (__put_user_unaligned(d_off, &lastdirent->d_off))
+                       goto out_putf;
                error = count - buf.count;
        }
 
@@ -1611,14 +1613,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
                nr &= ~1UL;
                while (nr) {
                        unsigned long h, l;
-                       __get_user(l, ufdset);
-                       __get_user(h, ufdset+1);
+                       if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
+                               return -EFAULT;
                        ufdset += 2;
                        *fdset++ = h << 32 | l;
                        nr -= 2;
                }
-               if (odd)
-                       __get_user(*fdset, ufdset);
+               if (odd && __get_user(*fdset, ufdset))
+                       return -EFAULT;
        } else {
                /* Tricky, must clear full unsigned long in the
                 * kernel fdset at the end, this makes sure that
@@ -1630,14 +1632,14 @@ int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
 }
 
 static
-void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
-                       unsigned long *fdset)
+int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
+                     unsigned long *fdset)
 {
        unsigned long odd;
        nr = ROUND_UP(nr, __COMPAT_NFDBITS);
 
        if (!ufdset)
-               return;
+               return 0;
 
        odd = nr & 1UL;
        nr &= ~1UL;
@@ -1645,13 +1647,14 @@ void compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
                unsigned long h, l;
                l = *fdset++;
                h = l >> 32;
-               __put_user(l, ufdset);
-               __put_user(h, ufdset+1);
+               if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
+                       return -EFAULT;
                ufdset += 2;
                nr -= 2;
        }
-       if (odd)
-               __put_user(*fdset, ufdset);
+       if (odd && __put_user(*fdset, ufdset))
+               return -EFAULT;
+       return 0;
 }
 
 
@@ -1726,10 +1729,10 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
                ret = 0;
        }
 
-       compat_set_fd_set(n, inp, fds.res_in);
-       compat_set_fd_set(n, outp, fds.res_out);
-       compat_set_fd_set(n, exp, fds.res_ex);
-
+       if (compat_set_fd_set(n, inp, fds.res_in) ||
+           compat_set_fd_set(n, outp, fds.res_out) ||
+           compat_set_fd_set(n, exp, fds.res_ex))
+               ret = -EFAULT;
 out:
        kfree(bits);
 out_nofds:
index a91f2628c981328e41a1b93a5b9c22cc7b0f913a..bcc3caf5d8209651de43e34fd95f3297bc59202b 100644 (file)
@@ -211,8 +211,10 @@ static int do_video_stillpicture(unsigned int fd, unsigned int cmd, unsigned lon
        up_native =
                compat_alloc_user_space(sizeof(struct video_still_picture));
 
-       put_user(compat_ptr(fp), &up_native->iFrame);
-       put_user(size, &up_native->size);
+       err =  put_user(compat_ptr(fp), &up_native->iFrame);
+       err |= put_user(size, &up_native->size);
+       if (err)
+               return -EFAULT;
 
        err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -236,8 +238,10 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
        err |= get_user(length, &up->length);
 
        up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
-       put_user(compat_ptr(palp), &up_native->palette);
-       put_user(length, &up_native->length);
+       err  = put_user(compat_ptr(palp), &up_native->palette);
+       err |= put_user(length, &up_native->length);
+       if (err)
+               return -EFAULT;
 
        err = sys_ioctl(fd, cmd, (unsigned long) up_native);
 
@@ -2043,16 +2047,19 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
         struct serial_struct ss;
         mm_segment_t oldseg = get_fs();
         __u32 udata;
+       unsigned int base;
 
         if (cmd == TIOCSSERIAL) {
                 if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
                         return -EFAULT;
                 if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
                        return -EFAULT;
-                __get_user(udata, &ss32->iomem_base);
+                if (__get_user(udata, &ss32->iomem_base))
+                       return -EFAULT;
                 ss.iomem_base = compat_ptr(udata);
-                __get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __get_user(ss.port_high, &ss32->port_high);
+                if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+                   __get_user(ss.port_high, &ss32->port_high))
+                       return -EFAULT;
                 ss.iomap_base = 0UL;
         }
         set_fs(KERNEL_DS);
@@ -2063,12 +2070,12 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
                         return -EFAULT;
                 if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
                        return -EFAULT;
-                __put_user((unsigned long)ss.iomem_base  >> 32 ?
-                            0xffffffff : (unsigned)(unsigned long)ss.iomem_base,
-                            &ss32->iomem_base);
-                __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift);
-                __put_user(ss.port_high, &ss32->port_high);
-
+               base = (unsigned long)ss.iomem_base  >> 32 ?
+                       0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
+               if (__put_user(base, &ss32->iomem_base) ||
+                   __put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
+                   __put_user(ss.port_high, &ss32->port_high))
+                       return -EFAULT;
         }
         return err;
 }
@@ -2397,6 +2404,7 @@ HANDLE_IOCTL(SIOCGIFMAP, dev_ifsioc)
 HANDLE_IOCTL(SIOCSIFMAP, dev_ifsioc)
 HANDLE_IOCTL(SIOCGIFADDR, dev_ifsioc)
 HANDLE_IOCTL(SIOCSIFADDR, dev_ifsioc)
+HANDLE_IOCTL(SIOCSIFHWBROADCAST, dev_ifsioc)
 
 /* ioctls used by appletalk ddp.c */
 HANDLE_IOCTL(SIOCATALKDIFADDR, dev_ifsioc)
index 3f4ff7a242b9ebf746bf93f1ea67beb0622d1cf4..f92cd303d2c9599a2837ee012814aabc15153891 100644 (file)
@@ -49,7 +49,7 @@ struct configfs_dirent {
 #define CONFIGFS_NOT_PINNED    (CONFIGFS_ITEM_ATTR)
 
 extern struct vfsmount * configfs_mount;
-extern kmem_cache_t *configfs_dir_cachep;
+extern struct kmem_cache *configfs_dir_cachep;
 
 extern int configfs_is_root(struct config_item *item);
 
index 8a3b6a1a6ad139aa50ba1da2e10bdf6cd0de2980..c398861f78a52fa72cd9cde3079c9f3593d033f5 100644 (file)
@@ -93,8 +93,8 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare
  *
  * called with parent inode's i_mutex held
  */
-int configfs_dirent_exists(struct configfs_dirent *parent_sd,
-                          const unsigned char *new)
+static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
+                                 const unsigned char *new)
 {
        struct configfs_dirent * sd;
 
@@ -1176,8 +1176,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
                return;
        }
 
-       mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
-       mutex_lock(&dentry->d_inode->i_mutex);
+       mutex_lock_nested(&configfs_sb->s_root->d_inode->i_mutex,
+                         I_MUTEX_PARENT);
+       mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
        if (configfs_detach_prep(dentry)) {
                printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n");
        }
index 68bd5c93ca524a089d1881bbcb9b5da4ef563b74..ed678529ebb2a8b58fdc86c277e3eea83aca96c9 100644 (file)
@@ -38,7 +38,7 @@
 
 struct vfsmount * configfs_mount = NULL;
 struct super_block * configfs_sb = NULL;
-kmem_cache_t *configfs_dir_cachep;
+struct kmem_cache *configfs_dir_cachep;
 static int configfs_mnt_count = 0;
 
 static struct super_operations configfs_ops = {
index a624c3ec81892870545cd80bea3f22c91069c2ab..0509cedd415c41d5db51fb0db26096f510067f5b 100644 (file)
@@ -481,6 +481,8 @@ static int cramfs_readpage(struct file *file, struct page * page)
                pgdata = kmap(page);
                if (compr_len == 0)
                        ; /* hole */
+               else if (compr_len > (PAGE_CACHE_SIZE << 1))
+                       printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len);
                else {
                        mutex_lock(&read_mutex);
                        bytes_filled = cramfs_uncompress_block(pgdata,
index fd4a428998efe1152c01129defec0691ac87a465..d68631f18df122824a0d36a307c647f15931c7bd 100644 (file)
@@ -43,7 +43,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache __read_mostly;
+static struct kmem_cache *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -68,15 +68,19 @@ struct dentry_stat_t dentry_stat = {
        .age_limit = 45,
 };
 
-static void d_callback(struct rcu_head *head)
+static void __d_free(struct dentry *dentry)
 {
-       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-
        if (dname_external(dentry))
                kfree(dentry->d_name.name);
        kmem_cache_free(dentry_cache, dentry); 
 }
 
+static void d_callback(struct rcu_head *head)
+{
+       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
+       __d_free(dentry);
+}
+
 /*
  * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  * inside dcache_lock.
@@ -85,7 +89,11 @@ static void d_free(struct dentry *dentry)
 {
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
-       call_rcu(&dentry->d_u.d_rcu, d_callback);
+       /* if dentry was never inserted into hash, immediate free is OK */
+       if (dentry->d_hash.pprev == NULL)
+               __d_free(dentry);
+       else
+               call_rcu(&dentry->d_u.d_rcu, d_callback);
 }
 
 /*
@@ -2072,10 +2080,10 @@ static void __init dcache_init(unsigned long mempages)
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep __read_mostly;
+struct kmem_cache *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);
 
index 0c4b0674854bbe0a47506a00ea1b5ef4b2529e4d..21af1629f9bcc2ebd6eda9d701bb31b1e03feb49 100644 (file)
@@ -37,7 +37,7 @@ struct dcookie_struct {
 
 static LIST_HEAD(dcookie_users);
 static DEFINE_MUTEX(dcookie_mutex);
-static kmem_cache_t *dcookie_cache __read_mostly;
+static struct kmem_cache *dcookie_cache __read_mostly;
 static struct list_head *dcookie_hashtable __read_mostly;
 static size_t hash_size __read_mostly;
 
index 81b2c6465eeb26dd21f23746209162eee8169b22..b5654a284fef1283b1186d7e39dcc1e0b3426396 100644 (file)
@@ -1,14 +1,32 @@
 menu "Distributed Lock Manager"
-       depends on INET && IP_SCTP && EXPERIMENTAL
+       depends on EXPERIMENTAL && INET
 
 config DLM
        tristate "Distributed Lock Manager (DLM)"
        depends on IPV6 || IPV6=n
        select CONFIGFS_FS
+       select IP_SCTP if DLM_SCTP
        help
        A general purpose distributed lock manager for kernel or userspace
        applications.
 
+choice
+       prompt "Select DLM communications protocol"
+       depends on DLM
+       default DLM_TCP
+       help
+       The DLM Can use TCP or SCTP for it's network communications.
+       SCTP supports multi-homed operations whereas TCP doesn't.
+       However, SCTP seems to have stability problems at the moment.
+
+config DLM_TCP
+       bool "TCP/IP"
+
+config DLM_SCTP
+       bool "SCTP"
+
+endchoice
+
 config DLM_DEBUG
        bool "DLM debugging"
        depends on DLM
index 1832e0297f7d828b34b146bb5784dad2c33f3f7c..65388944eba0e8a5507842ffe23bcde8d0d39866 100644 (file)
@@ -4,7 +4,6 @@ dlm-y :=                        ast.o \
                                dir.o \
                                lock.o \
                                lockspace.o \
-                               lowcomms.o \
                                main.o \
                                member.o \
                                memory.o \
@@ -17,3 +16,6 @@ dlm-y :=                      ast.o \
                                util.o
 dlm-$(CONFIG_DLM_DEBUG) +=     debug_fs.o
 
+dlm-$(CONFIG_DLM_TCP)   += lowcomms-tcp.o
+
+dlm-$(CONFIG_DLM_SCTP)  += lowcomms-sctp.o
\ No newline at end of file
index 1e5cd67e1b7ad27c36c8fb9b243547272c41403b..1ee8195e6fc0c17a34fcd351b9248f64d5171025 100644 (file)
@@ -471,6 +471,7 @@ struct dlm_ls {
        char                    *ls_recover_buf;
        int                     ls_recover_nodeid; /* for debugging */
        uint64_t                ls_rcom_seq;
+       spinlock_t              ls_rcom_spin;
        struct list_head        ls_recover_list;
        spinlock_t              ls_recover_list_lock;
        int                     ls_recover_list_count;
@@ -488,7 +489,8 @@ struct dlm_ls {
 #define LSFL_RUNNING           1
 #define LSFL_RECOVERY_STOP     2
 #define LSFL_RCOM_READY                3
-#define LSFL_UEVENT_WAIT       4
+#define LSFL_RCOM_WAIT         4
+#define LSFL_UEVENT_WAIT       5
 
 /* much of this is just saving user space pointers associated with the
    lock that we pass back to the user lib with an ast */
index 3f2befa4797b9a983914dfc16b84c2f3bf852481..30878defaeb6d23a92378233d260f6cee082f7ea 100644 (file)
@@ -2372,6 +2372,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
 {
        lkb->lkb_exflags = ms->m_exflags;
+       lkb->lkb_sbflags = ms->m_sbflags;
        lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
                         (ms->m_flags & 0x0000FFFF);
 }
@@ -3028,10 +3029,17 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
 
        while (1) {
                if (dlm_locking_stopped(ls)) {
-                       if (!recovery)
-                               dlm_add_requestqueue(ls, nodeid, hd);
-                       error = -EINTR;
-                       goto out;
+                       if (recovery) {
+                               error = -EINTR;
+                               goto out;
+                       }
+                       error = dlm_add_requestqueue(ls, nodeid, hd);
+                       if (error == -EAGAIN)
+                               continue;
+                       else {
+                               error = -EINTR;
+                               goto out;
+                       }
                }
 
                if (lock_recovery_try(ls))
index f8842ca443c24dda7e24344ee38a5a61e560acf3..59012b089e8d68ba77444a8ef0fd82f8743f5ca6 100644 (file)
@@ -22,6 +22,7 @@
 #include "memory.h"
 #include "lock.h"
 #include "recover.h"
+#include "requestqueue.h"
 
 #ifdef CONFIG_DLM_DEBUG
 int dlm_create_debug_file(struct dlm_ls *ls);
@@ -478,6 +479,8 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
        ls->ls_recoverd_task = NULL;
        mutex_init(&ls->ls_recoverd_active);
        spin_lock_init(&ls->ls_recover_lock);
+       spin_lock_init(&ls->ls_rcom_spin);
+       get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
        ls->ls_recover_status = 0;
        ls->ls_recover_seq = 0;
        ls->ls_recover_args = NULL;
@@ -684,6 +687,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
         * Free structures on any other lists
         */
 
+       dlm_purge_requestqueue(ls);
        kfree(ls->ls_recover_args);
        dlm_clear_free_entries(ls);
        dlm_clear_members(ls);
similarity index 87%
rename from fs/dlm/lowcomms.c
rename to fs/dlm/lowcomms-sctp.c
index 6da6b14d5a61b40a83e904e6f56cc19d12865ad2..fe158d7a92853074da36daa33dcbcbabaae00ec7 100644 (file)
@@ -2,7 +2,7 @@
 *******************************************************************************
 **
 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
-**  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+**  Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 **
 **  This copyrighted material is made available to anyone wishing to use,
 **  modify, copy, or redistribute it subject to the terms and conditions
@@ -75,13 +75,13 @@ struct nodeinfo {
 };
 
 static DEFINE_IDR(nodeinfo_idr);
-static struct rw_semaphore     nodeinfo_lock;
-static int                     max_nodeid;
+static DECLARE_RWSEM(nodeinfo_lock);
+static int max_nodeid;
 
 struct cbuf {
-       unsigned                base;
-       unsigned                len;
-       unsigned                mask;
+       unsigned int base;
+       unsigned int len;
+       unsigned int mask;
 };
 
 /* Just the one of these, now. But this struct keeps
@@ -90,9 +90,9 @@ struct cbuf {
 #define CF_READ_PENDING 1
 
 struct connection {
-       struct socket          *sock;
+       struct socket           *sock;
        unsigned long           flags;
-       struct page            *rx_page;
+       struct page             *rx_page;
        atomic_t                waiting_requests;
        struct cbuf             cb;
        int                     eagain_flag;
@@ -102,36 +102,40 @@ struct connection {
 
 struct writequeue_entry {
        struct list_head        list;
-       struct page            *page;
+       struct page             *page;
        int                     offset;
        int                     len;
        int                     end;
        int                     users;
-       struct nodeinfo        *ni;
+       struct nodeinfo         *ni;
 };
 
-#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0)
-#define CBUF_EMPTY(cb) ((cb)->len == 0)
-#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1))
-#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask)
+static void cbuf_add(struct cbuf *cb, int n)
+{
+       cb->len += n;
+}
 
-#define CBUF_INIT(cb, size) \
-do { \
-       (cb)->base = (cb)->len = 0; \
-       (cb)->mask = ((size)-1); \
-} while(0)
+static int cbuf_data(struct cbuf *cb)
+{
+       return ((cb->base + cb->len) & cb->mask);
+}
 
-#define CBUF_EAT(cb, n) \
-do { \
-       (cb)->len  -= (n); \
-       (cb)->base += (n); \
-       (cb)->base &= (cb)->mask; \
-} while(0)
+static void cbuf_init(struct cbuf *cb, int size)
+{
+       cb->base = cb->len = 0;
+       cb->mask = size-1;
+}
 
+static void cbuf_eat(struct cbuf *cb, int n)
+{
+       cb->len  -= n;
+       cb->base += n;
+       cb->base &= cb->mask;
+}
 
 /* List of nodes which have writes pending */
-static struct list_head write_nodes;
-static spinlock_t write_nodes_lock;
+static LIST_HEAD(write_nodes);
+static DEFINE_SPINLOCK(write_nodes_lock);
 
 /* Maximum number of incoming messages to process before
  * doing a schedule()
@@ -141,8 +145,7 @@ static spinlock_t write_nodes_lock;
 /* Manage daemons */
 static struct task_struct *recv_task;
 static struct task_struct *send_task;
-static wait_queue_head_t lowcomms_recv_wait;
-static atomic_t accepting;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait);
 
 /* The SCTP connection */
 static struct connection sctp_con;
@@ -161,11 +164,11 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
                return error;
 
        if (dlm_local_addr[0]->ss_family == AF_INET) {
-               struct sockaddr_in *in4  = (struct sockaddr_in *) &addr;
+               struct sockaddr_in *in4  = (struct sockaddr_in *) &addr;
                struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
                ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
        } else {
-               struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
+               struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &addr;
                struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
                memcpy(&ret6->sin6_addr, &in6->sin6_addr,
                       sizeof(in6->sin6_addr));
@@ -174,6 +177,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
        return 0;
 }
 
+/* If alloc is 0 here we will not attempt to allocate a new
+   nodeinfo struct */
 static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
 {
        struct nodeinfo *ni;
@@ -184,44 +189,45 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
        ni = idr_find(&nodeinfo_idr, nodeid);
        up_read(&nodeinfo_lock);
 
-       if (!ni && alloc) {
-               down_write(&nodeinfo_lock);
+       if (ni || !alloc)
+               return ni;
 
-               ni = idr_find(&nodeinfo_idr, nodeid);
-               if (ni)
-                       goto out_up;
+       down_write(&nodeinfo_lock);
 
-               r = idr_pre_get(&nodeinfo_idr, alloc);
-               if (!r)
-                       goto out_up;
+       ni = idr_find(&nodeinfo_idr, nodeid);
+       if (ni)
+               goto out_up;
 
-               ni = kmalloc(sizeof(struct nodeinfo), alloc);
-               if (!ni)
-                       goto out_up;
+       r = idr_pre_get(&nodeinfo_idr, alloc);
+       if (!r)
+               goto out_up;
 
-               r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
-               if (r) {
-                       kfree(ni);
-                       ni = NULL;
-                       goto out_up;
-               }
-               if (n != nodeid) {
-                       idr_remove(&nodeinfo_idr, n);
-                       kfree(ni);
-                       ni = NULL;
-                       goto out_up;
-               }
-               memset(ni, 0, sizeof(struct nodeinfo));
-               spin_lock_init(&ni->lock);
-               INIT_LIST_HEAD(&ni->writequeue);
-               spin_lock_init(&ni->writequeue_lock);
-               ni->nodeid = nodeid;
-
-               if (nodeid > max_nodeid)
-                       max_nodeid = nodeid;
-       out_up:
-               up_write(&nodeinfo_lock);
+       ni = kmalloc(sizeof(struct nodeinfo), alloc);
+       if (!ni)
+               goto out_up;
+
+       r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
+       if (r) {
+               kfree(ni);
+               ni = NULL;
+               goto out_up;
        }
+       if (n != nodeid) {
+               idr_remove(&nodeinfo_idr, n);
+               kfree(ni);
+               ni = NULL;
+               goto out_up;
+       }
+       memset(ni, 0, sizeof(struct nodeinfo));
+       spin_lock_init(&ni->lock);
+       INIT_LIST_HEAD(&ni->writequeue);
+       spin_lock_init(&ni->writequeue_lock);
+       ni->nodeid = nodeid;
+
+       if (nodeid > max_nodeid)
+               max_nodeid = nodeid;
+out_up:
+       up_write(&nodeinfo_lock);
 
        return ni;
 }
@@ -279,13 +285,13 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
                in4_addr->sin_port = cpu_to_be16(port);
                memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
                memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
-                                     sizeof(struct sockaddr_in));
+                      sizeof(struct sockaddr_in));
                *addr_len = sizeof(struct sockaddr_in);
        } else {
                struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
                in6_addr->sin6_port = cpu_to_be16(port);
                memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
-                                     sizeof(struct sockaddr_in6));
+                      sizeof(struct sockaddr_in6));
                *addr_len = sizeof(struct sockaddr_in6);
        }
 }
@@ -324,7 +330,7 @@ static void send_shutdown(sctp_assoc_t associd)
        cmsg->cmsg_type = SCTP_SNDRCV;
        cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
        outmessage.msg_controllen = cmsg->cmsg_len;
-       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       sinfo = CMSG_DATA(cmsg);
        memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
 
        sinfo->sinfo_flags |= MSG_EOF;
@@ -387,7 +393,7 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
 
                        if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
                                log_print("COMM_UP for invalid assoc ID %d",
-                                        (int)sn->sn_assoc_change.sac_assoc_id);
+                                         (int)sn->sn_assoc_change.sac_assoc_id);
                                init_failed();
                                return;
                        }
@@ -398,15 +404,18 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
                        fs = get_fs();
                        set_fs(get_ds());
                        ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
-                                               IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
-                                               (char*)&prim, &prim_len);
+                                                            IPPROTO_SCTP,
+                                                            SCTP_PRIMARY_ADDR,
+                                                            (char*)&prim,
+                                                            &prim_len);
                        set_fs(fs);
                        if (ret < 0) {
                                struct nodeinfo *ni;
 
                                log_print("getsockopt/sctp_primary_addr on "
                                          "new assoc %d failed : %d",
-                                   (int)sn->sn_assoc_change.sac_assoc_id, ret);
+                                         (int)sn->sn_assoc_change.sac_assoc_id,
+                                         ret);
 
                                /* Retry INIT later */
                                ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
@@ -426,12 +435,10 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
                                return;
 
                        /* Save the assoc ID */
-                       spin_lock(&ni->lock);
                        ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
-                       spin_unlock(&ni->lock);
 
                        log_print("got new/restarted association %d nodeid %d",
-                              (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
+                                 (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
 
                        /* Send any pending writes */
                        clear_bit(NI_INIT_PENDING, &ni->flags);
@@ -507,13 +514,12 @@ static int receive_from_sock(void)
                sctp_con.rx_page = alloc_page(GFP_ATOMIC);
                if (sctp_con.rx_page == NULL)
                        goto out_resched;
-               CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE);
+               cbuf_init(&sctp_con.cb, PAGE_CACHE_SIZE);
        }
 
        memset(&incmsg, 0, sizeof(incmsg));
        memset(&msgname, 0, sizeof(msgname));
 
-       memset(incmsg, 0, sizeof(incmsg));
        msg.msg_name = &msgname;
        msg.msg_namelen = sizeof(msgname);
        msg.msg_flags = 0;
@@ -532,17 +538,17 @@ static int receive_from_sock(void)
         * iov[0] is the bit of the circular buffer between the current end
         * point (cb.base + cb.len) and the end of the buffer.
         */
-       iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb);
+       iov[0].iov_len = sctp_con.cb.base - cbuf_data(&sctp_con.cb);
        iov[0].iov_base = page_address(sctp_con.rx_page) +
-                         CBUF_DATA(&sctp_con.cb);
+               cbuf_data(&sctp_con.cb);
        iov[1].iov_len = 0;
 
        /*
         * iov[1] is the bit of the circular buffer between the start of the
         * buffer and the start of the currently used section (cb.base)
         */
-       if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) {
-               iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb);
+       if (cbuf_data(&sctp_con.cb) >= sctp_con.cb.base) {
+               iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&sctp_con.cb);
                iov[1].iov_len = sctp_con.cb.base;
                iov[1].iov_base = page_address(sctp_con.rx_page);
                msg.msg_iovlen = 2;
@@ -557,7 +563,7 @@ static int receive_from_sock(void)
        msg.msg_control = incmsg;
        msg.msg_controllen = sizeof(incmsg);
        cmsg = CMSG_FIRSTHDR(&msg);
-       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       sinfo = CMSG_DATA(cmsg);
 
        if (msg.msg_flags & MSG_NOTIFICATION) {
                process_sctp_notification(&msg, page_address(sctp_con.rx_page));
@@ -583,29 +589,29 @@ static int receive_from_sock(void)
        if (r == 1)
                return 0;
 
-       CBUF_ADD(&sctp_con.cb, ret);
+       cbuf_add(&sctp_con.cb, ret);
        ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
                                          page_address(sctp_con.rx_page),
                                          sctp_con.cb.base, sctp_con.cb.len,
                                          PAGE_CACHE_SIZE);
        if (ret < 0)
                goto out_close;
-       CBUF_EAT(&sctp_con.cb, ret);
+       cbuf_eat(&sctp_con.cb, ret);
 
-      out:
+out:
        ret = 0;
        goto out_ret;
 
-      out_resched:
+out_resched:
        lowcomms_data_ready(sctp_con.sock->sk, 0);
        ret = 0;
-       schedule();
+       cond_resched();
        goto out_ret;
 
-      out_close:
+out_close:
        if (ret != -EAGAIN)
                log_print("error reading from sctp socket: %d", ret);
-      out_ret:
+out_ret:
        return ret;
 }
 
@@ -619,10 +625,12 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
        set_fs(get_ds());
        if (num == 1)
                result = sctp_con.sock->ops->bind(sctp_con.sock,
-                                       (struct sockaddr *) addr, addr_len);
+                                                 (struct sockaddr *) addr,
+                                                 addr_len);
        else
                result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
-                               SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len);
+                                                       SCTP_SOCKOPT_BINDX_ADD,
+                                                       (char *)addr, addr_len);
        set_fs(fs);
 
        if (result < 0)
@@ -719,10 +727,10 @@ static int init_sock(void)
 
        return 0;
 
- create_delsock:
+create_delsock:
        sock_release(sock);
        sctp_con.sock = NULL;
- out:
+out:
        return result;
 }
 
@@ -756,16 +764,13 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
        int users = 0;
        struct nodeinfo *ni;
 
-       if (!atomic_read(&accepting))
-               return NULL;
-
        ni = nodeid2nodeinfo(nodeid, allocation);
        if (!ni)
                return NULL;
 
        spin_lock(&ni->writequeue_lock);
        e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
-       if (((struct list_head *) e == &ni->writequeue) ||
+       if ((&e->list == &ni->writequeue) ||
            (PAGE_CACHE_SIZE - e->end < len)) {
                e = NULL;
        } else {
@@ -776,7 +781,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
        spin_unlock(&ni->writequeue_lock);
 
        if (e) {
-             got_one:
+       got_one:
                if (users == 0)
                        kmap(e->page);
                *ppc = page_address(e->page) + offset;
@@ -803,9 +808,6 @@ void dlm_lowcomms_commit_buffer(void *arg)
        int users;
        struct nodeinfo *ni = e->ni;
 
-       if (!atomic_read(&accepting))
-               return;
-
        spin_lock(&ni->writequeue_lock);
        users = --e->users;
        if (users)
@@ -822,7 +824,7 @@ void dlm_lowcomms_commit_buffer(void *arg)
        }
        return;
 
-      out:
+out:
        spin_unlock(&ni->writequeue_lock);
        return;
 }
@@ -878,7 +880,7 @@ static void initiate_association(int nodeid)
        cmsg->cmsg_level = IPPROTO_SCTP;
        cmsg->cmsg_type = SCTP_SNDRCV;
        cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       sinfo = CMSG_DATA(cmsg);
        memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
        sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
 
@@ -892,7 +894,7 @@ static void initiate_association(int nodeid)
 }
 
 /* Send a message */
-static int send_to_sock(struct nodeinfo *ni)
+static void send_to_sock(struct nodeinfo *ni)
 {
        int ret = 0;
        struct writequeue_entry *e;
@@ -903,13 +905,13 @@ static int send_to_sock(struct nodeinfo *ni)
        struct sctp_sndrcvinfo *sinfo;
        struct kvec iov;
 
-        /* See if we need to init an association before we start
+       /* See if we need to init an association before we start
           sending precious messages */
        spin_lock(&ni->lock);
        if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
                spin_unlock(&ni->lock);
                initiate_association(ni->nodeid);
-               return 0;
+               return;
        }
        spin_unlock(&ni->lock);
 
@@ -923,7 +925,7 @@ static int send_to_sock(struct nodeinfo *ni)
        cmsg->cmsg_level = IPPROTO_SCTP;
        cmsg->cmsg_type = SCTP_SNDRCV;
        cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-       sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+       sinfo = CMSG_DATA(cmsg);
        memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
        sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
        sinfo->sinfo_assoc_id = ni->assoc_id;
@@ -955,7 +957,7 @@ static int send_to_sock(struct nodeinfo *ni)
                                goto send_error;
                } else {
                        /* Don't starve people filling buffers */
-                       schedule();
+                       cond_resched();
                }
 
                spin_lock(&ni->writequeue_lock);
@@ -964,15 +966,16 @@ static int send_to_sock(struct nodeinfo *ni)
 
                if (e->len == 0 && e->users == 0) {
                        list_del(&e->list);
+                       kunmap(e->page);
                        free_entry(e);
                        continue;
                }
        }
        spin_unlock(&ni->writequeue_lock);
- out:
-       return ret;
+out:
+       return;
 
- send_error:
+send_error:
        log_print("Error sending to node %d %d", ni->nodeid, ret);
        spin_lock(&ni->lock);
        if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
@@ -982,7 +985,7 @@ static int send_to_sock(struct nodeinfo *ni)
        } else
                spin_unlock(&ni->lock);
 
-       return ret;
+       return;
 }
 
 /* Try to send any messages that are pending */
@@ -994,7 +997,7 @@ static void process_output_queue(void)
        spin_lock_bh(&write_nodes_lock);
        list_for_each_safe(list, temp, &write_nodes) {
                struct nodeinfo *ni =
-                   list_entry(list, struct nodeinfo, write_list);
+                       list_entry(list, struct nodeinfo, write_list);
                clear_bit(NI_WRITE_PENDING, &ni->flags);
                list_del(&ni->write_list);
 
@@ -1106,7 +1109,7 @@ static int dlm_recvd(void *data)
                set_current_state(TASK_INTERRUPTIBLE);
                add_wait_queue(&lowcomms_recv_wait, &wait);
                if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
-                       schedule();
+                       cond_resched();
                remove_wait_queue(&lowcomms_recv_wait, &wait);
                set_current_state(TASK_RUNNING);
 
@@ -1118,12 +1121,12 @@ static int dlm_recvd(void *data)
 
                                /* Don't starve out everyone else */
                                if (++count >= MAX_RX_MSG_COUNT) {
-                                       schedule();
+                                       cond_resched();
                                        count = 0;
                                }
                        } while (!kthread_should_stop() && ret >=0);
                }
-               schedule();
+               cond_resched();
        }
 
        return 0;
@@ -1138,7 +1141,7 @@ static int dlm_sendd(void *data)
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
                if (write_list_empty())
-                       schedule();
+                       cond_resched();
                set_current_state(TASK_RUNNING);
 
                if (sctp_con.eagain_flag) {
@@ -1166,7 +1169,7 @@ static int daemons_start(void)
 
        p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
        error = IS_ERR(p);
-               if (error) {
+       if (error) {
                log_print("can't start dlm_recvd %d", error);
                return error;
        }
@@ -1174,7 +1177,7 @@ static int daemons_start(void)
 
        p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
        error = IS_ERR(p);
-               if (error) {
+       if (error) {
                log_print("can't start dlm_sendd %d", error);
                kthread_stop(recv_task);
                return error;
@@ -1197,43 +1200,28 @@ int dlm_lowcomms_start(void)
        error = daemons_start();
        if (error)
                goto fail_sock;
-       atomic_set(&accepting, 1);
        return 0;
 
- fail_sock:
+fail_sock:
        close_connection();
        return error;
 }
 
-/* Set all the activity flags to prevent any socket activity. */
-
 void dlm_lowcomms_stop(void)
 {
-       atomic_set(&accepting, 0);
+       int i;
+
        sctp_con.flags = 0x7;
        daemons_stop();
        clean_writequeues();
        close_connection();
        dealloc_nodeinfo();
        max_nodeid = 0;
-}
 
-int dlm_lowcomms_init(void)
-{
-       init_waitqueue_head(&lowcomms_recv_wait);
-       spin_lock_init(&write_nodes_lock);
-       INIT_LIST_HEAD(&write_nodes);
-       init_rwsem(&nodeinfo_lock);
-       return 0;
-}
-
-void dlm_lowcomms_exit(void)
-{
-       int i;
+       dlm_local_count = 0;
+       dlm_local_nodeid = 0;
 
        for (i = 0; i < dlm_local_count; i++)
                kfree(dlm_local_addr[i]);
-       dlm_local_count = 0;
-       dlm_local_nodeid = 0;
 }
 
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
new file mode 100644 (file)
index 0000000..8f2791f
--- /dev/null
@@ -0,0 +1,1189 @@
+/******************************************************************************
+*******************************************************************************
+**
+**  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
+**  Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
+**
+**  This copyrighted material is made available to anyone wishing to use,
+**  modify, copy, or redistribute it subject to the terms and conditions
+**  of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+
+/*
+ * lowcomms.c
+ *
+ * This is the "low-level" comms layer.
+ *
+ * It is responsible for sending/receiving messages
+ * from other nodes in the cluster.
+ *
+ * Cluster nodes are referred to by their nodeids. nodeids are
+ * simply 32 bit numbers to the locking module - if they need to
+ * be expanded for the cluster infrastructure then that is it's
+ * responsibility. It is this layer's
+ * responsibility to resolve these into IP address or
+ * whatever it needs for inter-node communication.
+ *
+ * The comms level is two kernel threads that deal mainly with
+ * the receiving of messages from other nodes and passing them
+ * up to the mid-level comms layer (which understands the
+ * message format) for execution by the locking core, and
+ * a send thread which does all the setting up of connections
+ * to remote nodes and the sending of data. Threads are not allowed
+ * to send their own data because it may cause them to wait in times
+ * of high load. Also, this way, the sending thread can collect together
+ * messages bound for one node and send them in one block.
+ *
+ * I don't see any problem with the recv thread executing the locking
+ * code on behalf of remote processes as the locking code is
+ * short, efficient and never waits.
+ *
+ */
+
+
+#include <asm/ioctls.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/pagemap.h>
+
+#include "dlm_internal.h"
+#include "lowcomms.h"
+#include "midcomms.h"
+#include "config.h"
+
+struct cbuf {
+       unsigned int base;
+       unsigned int len;
+       unsigned int mask;
+};
+
+#define NODE_INCREMENT 32
+static void cbuf_add(struct cbuf *cb, int n)
+{
+       cb->len += n;
+}
+
+static int cbuf_data(struct cbuf *cb)
+{
+       return ((cb->base + cb->len) & cb->mask);
+}
+
+static void cbuf_init(struct cbuf *cb, int size)
+{
+       cb->base = cb->len = 0;
+       cb->mask = size-1;
+}
+
+static void cbuf_eat(struct cbuf *cb, int n)
+{
+       cb->len  -= n;
+       cb->base += n;
+       cb->base &= cb->mask;
+}
+
+static bool cbuf_empty(struct cbuf *cb)
+{
+       return cb->len == 0;
+}
+
+/* Maximum number of incoming messages to process before
+   doing a cond_resched()
+*/
+#define MAX_RX_MSG_COUNT 25
+
+struct connection {
+       struct socket *sock;    /* NULL if not connected */
+       uint32_t nodeid;        /* So we know who we are in the list */
+       struct rw_semaphore sock_sem; /* Stop connect races */
+       struct list_head read_list;   /* On this list when ready for reading */
+       struct list_head write_list;  /* On this list when ready for writing */
+       struct list_head state_list;  /* On this list when ready to connect */
+       unsigned long flags;    /* bit 1,2 = We are on the read/write lists */
+#define CF_READ_PENDING 1
+#define CF_WRITE_PENDING 2
+#define CF_CONNECT_PENDING 3
+#define CF_IS_OTHERCON 4
+       struct list_head writequeue;  /* List of outgoing writequeue_entries */
+       struct list_head listenlist;  /* List of allocated listening sockets */
+       spinlock_t writequeue_lock;
+       int (*rx_action) (struct connection *); /* What to do when active */
+       struct page *rx_page;
+       struct cbuf cb;
+       int retries;
+       atomic_t waiting_requests;
+#define MAX_CONNECT_RETRIES 3
+       struct connection *othercon;
+};
+#define sock2con(x) ((struct connection *)(x)->sk_user_data)
+
+/* An entry waiting to be sent */
+struct writequeue_entry {
+       struct list_head list;
+       struct page *page;
+       int offset;
+       int len;
+       int end;
+       int users;
+       struct connection *con;
+};
+
+static struct sockaddr_storage dlm_local_addr;
+
+/* Manage daemons */
+static struct task_struct *recv_task;
+static struct task_struct *send_task;
+
+static wait_queue_t lowcomms_send_waitq_head;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
+static wait_queue_t lowcomms_recv_waitq_head;
+static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
+
+/* An array of pointers to connections, indexed by NODEID */
+static struct connection **connections;
+static DECLARE_MUTEX(connections_lock);
+static kmem_cache_t *con_cache;
+static int conn_array_size;
+
+/* List of sockets that have reads pending */
+static LIST_HEAD(read_sockets);
+static DEFINE_SPINLOCK(read_sockets_lock);
+
+/* List of sockets which have writes pending */
+static LIST_HEAD(write_sockets);
+static DEFINE_SPINLOCK(write_sockets_lock);
+
+/* List of sockets which have connects pending */
+static LIST_HEAD(state_sockets);
+static DEFINE_SPINLOCK(state_sockets_lock);
+
+static struct connection *nodeid2con(int nodeid, gfp_t allocation)
+{
+       struct connection *con = NULL;
+
+       down(&connections_lock);
+       if (nodeid >= conn_array_size) {
+               int new_size = nodeid + NODE_INCREMENT;
+               struct connection **new_conns;
+
+               new_conns = kzalloc(sizeof(struct connection *) *
+                                   new_size, allocation);
+               if (!new_conns)
+                       goto finish;
+
+               memcpy(new_conns, connections,  sizeof(struct connection *) * conn_array_size);
+               conn_array_size = new_size;
+               kfree(connections);
+               connections = new_conns;
+
+       }
+
+       con = connections[nodeid];
+       if (con == NULL && allocation) {
+               con = kmem_cache_zalloc(con_cache, allocation);
+               if (!con)
+                       goto finish;
+
+               con->nodeid = nodeid;
+               init_rwsem(&con->sock_sem);
+               INIT_LIST_HEAD(&con->writequeue);
+               spin_lock_init(&con->writequeue_lock);
+
+               connections[nodeid] = con;
+       }
+
+finish:
+       up(&connections_lock);
+       return con;
+}
+
+/* Data available on socket or listen socket received a connect */
+static void lowcomms_data_ready(struct sock *sk, int count_unused)
+{
+       struct connection *con = sock2con(sk);
+
+       atomic_inc(&con->waiting_requests);
+       if (test_and_set_bit(CF_READ_PENDING, &con->flags))
+               return;
+
+       spin_lock_bh(&read_sockets_lock);
+       list_add_tail(&con->read_list, &read_sockets);
+       spin_unlock_bh(&read_sockets_lock);
+
+       wake_up_interruptible(&lowcomms_recv_waitq);
+}
+
+static void lowcomms_write_space(struct sock *sk)
+{
+       struct connection *con = sock2con(sk);
+
+       if (test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+               return;
+
+       spin_lock_bh(&write_sockets_lock);
+       list_add_tail(&con->write_list, &write_sockets);
+       spin_unlock_bh(&write_sockets_lock);
+
+       wake_up_interruptible(&lowcomms_send_waitq);
+}
+
+static inline void lowcomms_connect_sock(struct connection *con)
+{
+       if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
+               return;
+
+       spin_lock_bh(&state_sockets_lock);
+       list_add_tail(&con->state_list, &state_sockets);
+       spin_unlock_bh(&state_sockets_lock);
+
+       wake_up_interruptible(&lowcomms_send_waitq);
+}
+
+static void lowcomms_state_change(struct sock *sk)
+{
+       if (sk->sk_state == TCP_ESTABLISHED)
+               lowcomms_write_space(sk);
+}
+
+/* Make a socket active */
+static int add_sock(struct socket *sock, struct connection *con)
+{
+       con->sock = sock;
+
+       /* Install a data_ready callback */
+       con->sock->sk->sk_data_ready = lowcomms_data_ready;
+       con->sock->sk->sk_write_space = lowcomms_write_space;
+       con->sock->sk->sk_state_change = lowcomms_state_change;
+
+       return 0;
+}
+
+/* Add the port number to an IP6 or 4 sockaddr and return the address
+   length */
+static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
+                         int *addr_len)
+{
+       saddr->ss_family =  dlm_local_addr.ss_family;
+       if (saddr->ss_family == AF_INET) {
+               struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
+               in4_addr->sin_port = cpu_to_be16(port);
+               *addr_len = sizeof(struct sockaddr_in);
+       } else {
+               struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
+               in6_addr->sin6_port = cpu_to_be16(port);
+               *addr_len = sizeof(struct sockaddr_in6);
+       }
+}
+
+/* Close a remote connection and tidy up */
+static void close_connection(struct connection *con, bool and_other)
+{
+       down_write(&con->sock_sem);
+
+       if (con->sock) {
+               sock_release(con->sock);
+               con->sock = NULL;
+       }
+       if (con->othercon && and_other) {
+               /* Will only re-enter once. */
+               close_connection(con->othercon, false);
+       }
+       if (con->rx_page) {
+               __free_page(con->rx_page);
+               con->rx_page = NULL;
+       }
+       con->retries = 0;
+       up_write(&con->sock_sem);
+}
+
+/* Data received from remote end */
+static int receive_from_sock(struct connection *con)
+{
+       int ret = 0;
+       struct msghdr msg;
+       struct iovec iov[2];
+       mm_segment_t fs;
+       unsigned len;
+       int r;
+       int call_again_soon = 0;
+
+       down_read(&con->sock_sem);
+
+       if (con->sock == NULL)
+               goto out;
+       if (con->rx_page == NULL) {
+               /*
+                * This doesn't need to be atomic, but I think it should
+                * improve performance if it is.
+                */
+               con->rx_page = alloc_page(GFP_ATOMIC);
+               if (con->rx_page == NULL)
+                       goto out_resched;
+               cbuf_init(&con->cb, PAGE_CACHE_SIZE);
+       }
+
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_iovlen = 1;
+       msg.msg_iov = iov;
+       msg.msg_name = NULL;
+       msg.msg_namelen = 0;
+       msg.msg_flags = 0;
+
+       /*
+        * iov[0] is the bit of the circular buffer between the current end
+        * point (cb.base + cb.len) and the end of the buffer.
+        */
+       iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
+       iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
+       iov[1].iov_len = 0;
+
+       /*
+        * iov[1] is the bit of the circular buffer between the start of the
+        * buffer and the start of the currently used section (cb.base)
+        */
+       if (cbuf_data(&con->cb) >= con->cb.base) {
+               iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
+               iov[1].iov_len = con->cb.base;
+               iov[1].iov_base = page_address(con->rx_page);
+               msg.msg_iovlen = 2;
+       }
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       fs = get_fs();
+       set_fs(get_ds());
+       r = ret = sock_recvmsg(con->sock, &msg, len,
+                              MSG_DONTWAIT | MSG_NOSIGNAL);
+       set_fs(fs);
+
+       if (ret <= 0)
+               goto out_close;
+       if (ret == len)
+               call_again_soon = 1;
+       cbuf_add(&con->cb, ret);
+       ret = dlm_process_incoming_buffer(con->nodeid,
+                                         page_address(con->rx_page),
+                                         con->cb.base, con->cb.len,
+                                         PAGE_CACHE_SIZE);
+       if (ret == -EBADMSG) {
+               printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, "
+                      "iov_len=%u, iov_base[0]=%p, read=%d\n",
+                      page_address(con->rx_page), con->cb.base, con->cb.len,
+                      len, iov[0].iov_base, r);
+       }
+       if (ret < 0)
+               goto out_close;
+       cbuf_eat(&con->cb, ret);
+
+       if (cbuf_empty(&con->cb) && !call_again_soon) {
+               __free_page(con->rx_page);
+               con->rx_page = NULL;
+       }
+
+out:
+       if (call_again_soon)
+               goto out_resched;
+       up_read(&con->sock_sem);
+       return 0;
+
+out_resched:
+       lowcomms_data_ready(con->sock->sk, 0);
+       up_read(&con->sock_sem);
+       cond_resched();
+       return 0;
+
+out_close:
+       up_read(&con->sock_sem);
+       if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
+               close_connection(con, false);
+               /* Reconnect when there is something to send */
+       }
+
+       return ret;
+}
+
+/* Listening socket is busy, accept a connection */
+static int accept_from_sock(struct connection *con)
+{
+       int result;
+       struct sockaddr_storage peeraddr;
+       struct socket *newsock;
+       int len;
+       int nodeid;
+       struct connection *newcon;
+
+       memset(&peeraddr, 0, sizeof(peeraddr));
+       result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
+                                 IPPROTO_TCP, &newsock);
+       if (result < 0)
+               return -ENOMEM;
+
+       down_read(&con->sock_sem);
+
+       result = -ENOTCONN;
+       if (con->sock == NULL)
+               goto accept_err;
+
+       newsock->type = con->sock->type;
+       newsock->ops = con->sock->ops;
+
+       result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
+       if (result < 0)
+               goto accept_err;
+
+       /* Get the connected socket's peer */
+       memset(&peeraddr, 0, sizeof(peeraddr));
+       if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
+                                 &len, 2)) {
+               result = -ECONNABORTED;
+               goto accept_err;
+       }
+
+       /* Get the new node's NODEID */
+       make_sockaddr(&peeraddr, 0, &len);
+       if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
+               printk("dlm: connect from non cluster node\n");
+               sock_release(newsock);
+               up_read(&con->sock_sem);
+               return -1;
+       }
+
+       log_print("got connection from %d", nodeid);
+
+       /*  Check to see if we already have a connection to this node. This
+        *  could happen if the two nodes initiate a connection at roughly
+        *  the same time and the connections cross on the wire.
+        * TEMPORARY FIX:
+        *  In this case we store the incoming one in "othercon"
+        */
+       newcon = nodeid2con(nodeid, GFP_KERNEL);
+       if (!newcon) {
+               result = -ENOMEM;
+               goto accept_err;
+       }
+       down_write(&newcon->sock_sem);
+       if (newcon->sock) {
+               struct connection *othercon = newcon->othercon;
+
+               if (!othercon) {
+                       othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
+                       if (!othercon) {
+                               printk("dlm: failed to allocate incoming socket\n");
+                               up_write(&newcon->sock_sem);
+                               result = -ENOMEM;
+                               goto accept_err;
+                       }
+                       othercon->nodeid = nodeid;
+                       othercon->rx_action = receive_from_sock;
+                       init_rwsem(&othercon->sock_sem);
+                       set_bit(CF_IS_OTHERCON, &othercon->flags);
+                       newcon->othercon = othercon;
+               }
+               othercon->sock = newsock;
+               newsock->sk->sk_user_data = othercon;
+               add_sock(newsock, othercon);
+       }
+       else {
+               newsock->sk->sk_user_data = newcon;
+               newcon->rx_action = receive_from_sock;
+               add_sock(newsock, newcon);
+
+       }
+
+       up_write(&newcon->sock_sem);
+
+       /*
+        * Add it to the active queue in case we got data
+        * beween processing the accept adding the socket
+        * to the read_sockets list
+        */
+       lowcomms_data_ready(newsock->sk, 0);
+       up_read(&con->sock_sem);
+
+       return 0;
+
+accept_err:
+       up_read(&con->sock_sem);
+       sock_release(newsock);
+
+       if (result != -EAGAIN)
+               printk("dlm: error accepting connection from node: %d\n", result);
+       return result;
+}
+
+/* Connect a new socket to its peer */
+static void connect_to_sock(struct connection *con)
+{
+       int result = -EHOSTUNREACH;
+       struct sockaddr_storage saddr;
+       int addr_len;
+       struct socket *sock;
+
+       if (con->nodeid == 0) {
+               log_print("attempt to connect sock 0 foiled");
+               return;
+       }
+
+       down_write(&con->sock_sem);
+       if (con->retries++ > MAX_CONNECT_RETRIES)
+               goto out;
+
+       /* Some odd races can cause double-connects, ignore them */
+       if (con->sock) {
+               result = 0;
+               goto out;
+       }
+
+       /* Create a socket to communicate with */
+       result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
+                                 IPPROTO_TCP, &sock);
+       if (result < 0)
+               goto out_err;
+
+       memset(&saddr, 0, sizeof(saddr));
+       if (dlm_nodeid_to_addr(con->nodeid, &saddr))
+               goto out_err;
+
+       sock->sk->sk_user_data = con;
+       con->rx_action = receive_from_sock;
+
+       make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len);
+
+       add_sock(sock, con);
+
+       log_print("connecting to %d", con->nodeid);
+       result =
+               sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
+                                  O_NONBLOCK);
+       if (result == -EINPROGRESS)
+               result = 0;
+       if (result == 0)
+               goto out;
+
+out_err:
+       if (con->sock) {
+               sock_release(con->sock);
+               con->sock = NULL;
+       }
+       /*
+        * Some errors are fatal and this list might need adjusting. For other
+        * errors we try again until the max number of retries is reached.
+        */
+       if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
+           result != -ENETDOWN && result != EINVAL
+           && result != -EPROTONOSUPPORT) {
+               lowcomms_connect_sock(con);
+               result = 0;
+       }
+out:
+       up_write(&con->sock_sem);
+       return;
+}
+
+static struct socket *create_listen_sock(struct connection *con,
+                                        struct sockaddr_storage *saddr)
+{
+       struct socket *sock = NULL;
+       mm_segment_t fs;
+       int result = 0;
+       int one = 1;
+       int addr_len;
+
+       if (dlm_local_addr.ss_family == AF_INET)
+               addr_len = sizeof(struct sockaddr_in);
+       else
+               addr_len = sizeof(struct sockaddr_in6);
+
+       /* Create a socket to communicate with */
+       result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
+       if (result < 0) {
+               printk("dlm: Can't create listening comms socket\n");
+               goto create_out;
+       }
+
+       fs = get_fs();
+       set_fs(get_ds());
+       result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+                                (char *)&one, sizeof(one));
+       set_fs(fs);
+       if (result < 0) {
+               printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
+                      result);
+       }
+       sock->sk->sk_user_data = con;
+       con->rx_action = accept_from_sock;
+       con->sock = sock;
+
+       /* Bind to our port */
+       make_sockaddr(saddr, dlm_config.tcp_port, &addr_len);
+       result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
+       if (result < 0) {
+               printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port);
+               sock_release(sock);
+               sock = NULL;
+               con->sock = NULL;
+               goto create_out;
+       }
+
+       fs = get_fs();
+       set_fs(get_ds());
+
+       result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+                                (char *)&one, sizeof(one));
+       set_fs(fs);
+       if (result < 0) {
+               printk("dlm: Set keepalive failed: %d\n", result);
+       }
+
+       result = sock->ops->listen(sock, 5);
+       if (result < 0) {
+               printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port);
+               sock_release(sock);
+               sock = NULL;
+               goto create_out;
+       }
+
+create_out:
+       return sock;
+}
+
+
+/* Listen on all interfaces */
+static int listen_for_all(void)
+{
+       struct socket *sock = NULL;
+       struct connection *con = nodeid2con(0, GFP_KERNEL);
+       int result = -EINVAL;
+
+       /* We don't support multi-homed hosts */
+       set_bit(CF_IS_OTHERCON, &con->flags);
+
+       sock = create_listen_sock(con, &dlm_local_addr);
+       if (sock) {
+               add_sock(sock, con);
+               result = 0;
+       }
+       else {
+               result = -EADDRINUSE;
+       }
+
+       return result;
+}
+
+
+
+static struct writequeue_entry *new_writequeue_entry(struct connection *con,
+                                                    gfp_t allocation)
+{
+       struct writequeue_entry *entry;
+
+       entry = kmalloc(sizeof(struct writequeue_entry), allocation);
+       if (!entry)
+               return NULL;
+
+       entry->page = alloc_page(allocation);
+       if (!entry->page) {
+               kfree(entry);
+               return NULL;
+       }
+
+       entry->offset = 0;
+       entry->len = 0;
+       entry->end = 0;
+       entry->users = 0;
+       entry->con = con;
+
+       return entry;
+}
+
+void *dlm_lowcomms_get_buffer(int nodeid, int len,
+                             gfp_t allocation, char **ppc)
+{
+       struct connection *con;
+       struct writequeue_entry *e;
+       int offset = 0;
+       int users = 0;
+
+       con = nodeid2con(nodeid, allocation);
+       if (!con)
+               return NULL;
+
+       e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
+       if ((&e->list == &con->writequeue) ||
+           (PAGE_CACHE_SIZE - e->end < len)) {
+               e = NULL;
+       } else {
+               offset = e->end;
+               e->end += len;
+               users = e->users++;
+       }
+       spin_unlock(&con->writequeue_lock);
+
+       if (e) {
+       got_one:
+               if (users == 0)
+                       kmap(e->page);
+               *ppc = page_address(e->page) + offset;
+               return e;
+       }
+
+       e = new_writequeue_entry(con, allocation);
+       if (e) {
+               spin_lock(&con->writequeue_lock);
+               offset = e->end;
+               e->end += len;
+               users = e->users++;
+               list_add_tail(&e->list, &con->writequeue);
+               spin_unlock(&con->writequeue_lock);
+               goto got_one;
+       }
+       return NULL;
+}
+
+void dlm_lowcomms_commit_buffer(void *mh)
+{
+       struct writequeue_entry *e = (struct writequeue_entry *)mh;
+       struct connection *con = e->con;
+       int users;
+
+       users = --e->users;
+       if (users)
+               goto out;
+       e->len = e->end - e->offset;
+       kunmap(e->page);
+       spin_unlock(&con->writequeue_lock);
+
+       if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) {
+               spin_lock_bh(&write_sockets_lock);
+               list_add_tail(&con->write_list, &write_sockets);
+               spin_unlock_bh(&write_sockets_lock);
+
+               wake_up_interruptible(&lowcomms_send_waitq);
+       }
+       return;
+
+out:
+       spin_unlock(&con->writequeue_lock);
+       return;
+}
+
+static void free_entry(struct writequeue_entry *e)
+{
+       __free_page(e->page);
+       kfree(e);
+}
+
+/* Send a message */
+static void send_to_sock(struct connection *con)
+{
+       int ret = 0;
+       ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
+       const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+       struct writequeue_entry *e;
+       int len, offset;
+
+       down_read(&con->sock_sem);
+       if (con->sock == NULL)
+               goto out_connect;
+
+       sendpage = con->sock->ops->sendpage;
+
+       spin_lock(&con->writequeue_lock);
+       for (;;) {
+               e = list_entry(con->writequeue.next, struct writequeue_entry,
+                              list);
+               if ((struct list_head *) e == &con->writequeue)
+                       break;
+
+               len = e->len;
+               offset = e->offset;
+               BUG_ON(len == 0 && e->users == 0);
+               spin_unlock(&con->writequeue_lock);
+
+               ret = 0;
+               if (len) {
+                       ret = sendpage(con->sock, e->page, offset, len,
+                                      msg_flags);
+                       if (ret == -EAGAIN || ret == 0)
+                               goto out;
+                       if (ret <= 0)
+                               goto send_error;
+               }
+               else {
+                       /* Don't starve people filling buffers */
+                       cond_resched();
+               }
+
+               spin_lock(&con->writequeue_lock);
+               e->offset += ret;
+               e->len -= ret;
+
+               if (e->len == 0 && e->users == 0) {
+                       list_del(&e->list);
+                       kunmap(e->page);
+                       free_entry(e);
+                       continue;
+               }
+       }
+       spin_unlock(&con->writequeue_lock);
+out:
+       up_read(&con->sock_sem);
+       return;
+
+send_error:
+       up_read(&con->sock_sem);
+       close_connection(con, false);
+       lowcomms_connect_sock(con);
+       return;
+
+out_connect:
+       up_read(&con->sock_sem);
+       lowcomms_connect_sock(con);
+       return;
+}
+
+static void clean_one_writequeue(struct connection *con)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       spin_lock(&con->writequeue_lock);
+       list_for_each_safe(list, temp, &con->writequeue) {
+               struct writequeue_entry *e =
+                       list_entry(list, struct writequeue_entry, list);
+               list_del(&e->list);
+               free_entry(e);
+       }
+       spin_unlock(&con->writequeue_lock);
+}
+
+/* Called from recovery when it knows that a node has
+   left the cluster */
+int dlm_lowcomms_close(int nodeid)
+{
+       struct connection *con;
+
+       if (!connections)
+               goto out;
+
+       log_print("closing connection to node %d", nodeid);
+       con = nodeid2con(nodeid, 0);
+       if (con) {
+               clean_one_writequeue(con);
+               close_connection(con, true);
+               atomic_set(&con->waiting_requests, 0);
+       }
+       return 0;
+
+out:
+       return -1;
+}
+
+/* API send message call, may queue the request */
+/* N.B. This is the old interface - use the new one for new calls */
+int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
+{
+       struct writequeue_entry *e;
+       char *b;
+
+       e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
+       if (e) {
+               memcpy(b, buf, len);
+               dlm_lowcomms_commit_buffer(e);
+               return 0;
+       }
+       return -ENOBUFS;
+}
+
+/* Look for activity on active sockets */
+static void process_sockets(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+       int count = 0;
+
+       spin_lock_bh(&read_sockets_lock);
+       list_for_each_safe(list, temp, &read_sockets) {
+
+               struct connection *con =
+                       list_entry(list, struct connection, read_list);
+               list_del(&con->read_list);
+               clear_bit(CF_READ_PENDING, &con->flags);
+
+               spin_unlock_bh(&read_sockets_lock);
+
+               /* This can reach zero if we are processing requests
+                * as they come in.
+                */
+               if (atomic_read(&con->waiting_requests) == 0) {
+                       spin_lock_bh(&read_sockets_lock);
+                       continue;
+               }
+
+               do {
+                       con->rx_action(con);
+
+                       /* Don't starve out everyone else */
+                       if (++count >= MAX_RX_MSG_COUNT) {
+                               cond_resched();
+                               count = 0;
+                       }
+
+               } while (!atomic_dec_and_test(&con->waiting_requests) &&
+                        !kthread_should_stop());
+
+               spin_lock_bh(&read_sockets_lock);
+       }
+       spin_unlock_bh(&read_sockets_lock);
+}
+
+/* Try to send any messages that are pending
+ */
+static void process_output_queue(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       spin_lock_bh(&write_sockets_lock);
+       list_for_each_safe(list, temp, &write_sockets) {
+               struct connection *con =
+                       list_entry(list, struct connection, write_list);
+               clear_bit(CF_WRITE_PENDING, &con->flags);
+               list_del(&con->write_list);
+
+               spin_unlock_bh(&write_sockets_lock);
+               send_to_sock(con);
+               spin_lock_bh(&write_sockets_lock);
+       }
+       spin_unlock_bh(&write_sockets_lock);
+}
+
+static void process_state_queue(void)
+{
+       struct list_head *list;
+       struct list_head *temp;
+
+       spin_lock_bh(&state_sockets_lock);
+       list_for_each_safe(list, temp, &state_sockets) {
+               struct connection *con =
+                       list_entry(list, struct connection, state_list);
+               list_del(&con->state_list);
+               clear_bit(CF_CONNECT_PENDING, &con->flags);
+               spin_unlock_bh(&state_sockets_lock);
+
+               connect_to_sock(con);
+               spin_lock_bh(&state_sockets_lock);
+       }
+       spin_unlock_bh(&state_sockets_lock);
+}
+
+
+/* Discard all entries on the write queues */
+static void clean_writequeues(void)
+{
+       int nodeid;
+
+       for (nodeid = 1; nodeid < conn_array_size; nodeid++) {
+               struct connection *con = nodeid2con(nodeid, 0);
+
+               if (con)
+                       clean_one_writequeue(con);
+       }
+}
+
+static int read_list_empty(void)
+{
+       int status;
+
+       spin_lock_bh(&read_sockets_lock);
+       status = list_empty(&read_sockets);
+       spin_unlock_bh(&read_sockets_lock);
+
+       return status;
+}
+
+/* DLM Transport comms receive daemon */
+static int dlm_recvd(void *data)
+{
+       init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
+       add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (read_list_empty())
+                       cond_resched();
+               set_current_state(TASK_RUNNING);
+
+               process_sockets();
+       }
+
+       return 0;
+}
+
+static int write_and_state_lists_empty(void)
+{
+       int status;
+
+       spin_lock_bh(&write_sockets_lock);
+       status = list_empty(&write_sockets);
+       spin_unlock_bh(&write_sockets_lock);
+
+       spin_lock_bh(&state_sockets_lock);
+       if (list_empty(&state_sockets) == 0)
+               status = 0;
+       spin_unlock_bh(&state_sockets_lock);
+
+       return status;
+}
+
+/* DLM Transport send daemon */
+static int dlm_sendd(void *data)
+{
+       init_waitqueue_entry(&lowcomms_send_waitq_head, current);
+       add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (write_and_state_lists_empty())
+                       cond_resched();
+               set_current_state(TASK_RUNNING);
+
+               process_state_queue();
+               process_output_queue();
+       }
+
+       return 0;
+}
+
+static void daemons_stop(void)
+{
+       kthread_stop(recv_task);
+       kthread_stop(send_task);
+}
+
+static int daemons_start(void)
+{
+       struct task_struct *p;
+       int error;
+
+       p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
+       error = IS_ERR(p);
+       if (error) {
+               log_print("can't start dlm_recvd %d", error);
+               return error;
+       }
+       recv_task = p;
+
+       p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
+       error = IS_ERR(p);
+       if (error) {
+               log_print("can't start dlm_sendd %d", error);
+               kthread_stop(recv_task);
+               return error;
+       }
+       send_task = p;
+
+       return 0;
+}
+
+/*
+ * Return the largest buffer size we can cope with.
+ */
+int lowcomms_max_buffer_size(void)
+{
+       return PAGE_CACHE_SIZE;
+}
+
+void dlm_lowcomms_stop(void)
+{
+       int i;
+
+       /* Set all the flags to prevent any
+          socket activity.
+       */
+       for (i = 0; i < conn_array_size; i++) {
+               if (connections[i])
+                       connections[i]->flags |= 0xFF;
+       }
+
+       daemons_stop();
+       clean_writequeues();
+
+       for (i = 0; i < conn_array_size; i++) {
+               if (connections[i]) {
+                       close_connection(connections[i], true);
+                       if (connections[i]->othercon)
+                               kmem_cache_free(con_cache, connections[i]->othercon);
+                       kmem_cache_free(con_cache, connections[i]);
+               }
+       }
+
+       kfree(connections);
+       connections = NULL;
+
+       kmem_cache_destroy(con_cache);
+}
+
+/* This is quite likely to sleep... */
+int dlm_lowcomms_start(void)
+{
+       int error = 0;
+
+       error = -ENOMEM;
+       connections = kzalloc(sizeof(struct connection *) *
+                             NODE_INCREMENT, GFP_KERNEL);
+       if (!connections)
+               goto out;
+
+       conn_array_size = NODE_INCREMENT;
+
+       if (dlm_our_addr(&dlm_local_addr, 0)) {
+               log_print("no local IP address has been set");
+               goto fail_free_conn;
+       }
+       if (!dlm_our_addr(&dlm_local_addr, 1)) {
+               log_print("This dlm comms module does not support multi-homed clustering");
+               goto fail_free_conn;
+       }
+
+       con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
+                                     __alignof__(struct connection), 0,
+                                     NULL, NULL);
+       if (!con_cache)
+               goto fail_free_conn;
+
+
+       /* Start listening */
+       error = listen_for_all();
+       if (error)
+               goto fail_unlisten;
+
+       error = daemons_start();
+       if (error)
+               goto fail_unlisten;
+
+       return 0;
+
+fail_unlisten:
+       close_connection(connections[0], false);
+       kmem_cache_free(con_cache, connections[0]);
+       kmem_cache_destroy(con_cache);
+
+fail_free_conn:
+       kfree(connections);
+
+out:
+       return error;
+}
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only.  This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-file-style: "linux"
+ * End:
+ */
index 2d045e0daae1f6a0d2c07e8e82b74b2004ed5602..a9a9618c0d3f2bcea8063a7217a7e2e036ed76fe 100644 (file)
@@ -14,8 +14,6 @@
 #ifndef __LOWCOMMS_DOT_H__
 #define __LOWCOMMS_DOT_H__
 
-int dlm_lowcomms_init(void);
-void dlm_lowcomms_exit(void);
 int dlm_lowcomms_start(void);
 void dlm_lowcomms_stop(void);
 int dlm_lowcomms_close(int nodeid);
index a8da8dc36b2eee898551cc0dc9a6492dfa56fca8..162fbae58fe556df3150f96a8423718e47261d9a 100644 (file)
@@ -16,7 +16,6 @@
 #include "lock.h"
 #include "user.h"
 #include "memory.h"
-#include "lowcomms.h"
 #include "config.h"
 
 #ifdef CONFIG_DLM_DEBUG
@@ -47,20 +46,14 @@ static int __init init_dlm(void)
        if (error)
                goto out_config;
 
-       error = dlm_lowcomms_init();
-       if (error)
-               goto out_debug;
-
        error = dlm_user_init();
        if (error)
-               goto out_lowcomms;
+               goto out_debug;
 
        printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
 
        return 0;
 
- out_lowcomms:
-       dlm_lowcomms_exit();
  out_debug:
        dlm_unregister_debugfs();
  out_config:
@@ -76,7 +69,6 @@ static int __init init_dlm(void)
 static void __exit exit_dlm(void)
 {
        dlm_user_exit();
-       dlm_lowcomms_exit();
        dlm_config_exit();
        dlm_memory_exit();
        dlm_lockspace_exit();
index a3f7de7f3a8f969b9500875203297f4b8d6872ef..85e2897bd7400fc4155948fc8eb1c81cb1ff8e01 100644 (file)
@@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
        struct dlm_member *memb, *safe;
        int i, error, found, pos = 0, neg = 0, low = -1;
 
+       /* previously removed members that we've not finished removing need to
+          count as a negative change so the "neg" recovery steps will happen */
+
+       list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
+               log_debug(ls, "prev removed member %d", memb->nodeid);
+               neg++;
+       }
+
        /* move departed members from ls_nodes to ls_nodes_gone */
 
        list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
index 989b608fd83603dfc193f540c91fc452be4cabc9..5352b03ff5aa81f9af31c081965f7ae8dd81f90e 100644 (file)
@@ -15,7 +15,7 @@
 #include "config.h"
 #include "memory.h"
 
-static kmem_cache_t *lkb_cache;
+static struct kmem_cache *lkb_cache;
 
 
 int dlm_memory_init(void)
index 518239a8b1e90f00dc6c33c3b834c004d402e5dd..4cc31be9cd9d849c69e986397451f54387c03236 100644 (file)
@@ -90,13 +90,28 @@ static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
        return 0;
 }
 
+static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
+{
+       spin_lock(&ls->ls_rcom_spin);
+       *new_seq = ++ls->ls_rcom_seq;
+       set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+       spin_unlock(&ls->ls_rcom_spin);
+}
+
+static void disallow_sync_reply(struct dlm_ls *ls)
+{
+       spin_lock(&ls->ls_rcom_spin);
+       clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+       clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+       spin_unlock(&ls->ls_rcom_spin);
+}
+
 int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
 {
        struct dlm_rcom *rc;
        struct dlm_mhandle *mh;
        int error = 0;
 
-       memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
        ls->ls_recover_nodeid = nodeid;
 
        if (nodeid == dlm_our_nodeid()) {
@@ -108,12 +123,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
        error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
        if (error)
                goto out;
-       rc->rc_id = ++ls->ls_rcom_seq;
+
+       allow_sync_reply(ls, &rc->rc_id);
+       memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 
        send_rcom(ls, mh, rc);
 
        error = dlm_wait_function(ls, &rcom_response);
-       clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+       disallow_sync_reply(ls);
        if (error)
                goto out;
 
@@ -150,14 +167,21 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
 
 static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
 {
-       if (rc_in->rc_id != ls->ls_rcom_seq) {
-               log_debug(ls, "reject old reply %d got %llx wanted %llx",
-                         rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq);
-               return;
+       spin_lock(&ls->ls_rcom_spin);
+       if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
+           rc_in->rc_id != ls->ls_rcom_seq) {
+               log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
+                         rc_in->rc_type, rc_in->rc_header.h_nodeid,
+                         (unsigned long long)rc_in->rc_id,
+                         (unsigned long long)ls->ls_rcom_seq);
+               goto out;
        }
        memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
        set_bit(LSFL_RCOM_READY, &ls->ls_flags);
+       clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
        wake_up(&ls->ls_wait_general);
+ out:
+       spin_unlock(&ls->ls_rcom_spin);
 }
 
 static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
@@ -171,7 +195,6 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
        struct dlm_mhandle *mh;
        int error = 0, len = sizeof(struct dlm_rcom);
 
-       memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
        ls->ls_recover_nodeid = nodeid;
 
        if (nodeid == dlm_our_nodeid()) {
@@ -185,12 +208,14 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
        if (error)
                goto out;
        memcpy(rc->rc_buf, last_name, last_len);
-       rc->rc_id = ++ls->ls_rcom_seq;
+
+       allow_sync_reply(ls, &rc->rc_id);
+       memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
 
        send_rcom(ls, mh, rc);
 
        error = dlm_wait_function(ls, &rcom_response);
-       clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
+       disallow_sync_reply(ls);
  out:
        return error;
 }
@@ -370,9 +395,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
 static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
 {
        struct dlm_rcom *rc;
+       struct rcom_config *rf;
        struct dlm_mhandle *mh;
        char *mb;
-       int mb_len = sizeof(struct dlm_rcom);
+       int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
 
        mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
        if (!mh)
@@ -391,6 +417,9 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
        rc->rc_id = rc_in->rc_id;
        rc->rc_result = -ESRCH;
 
+       rf = (struct rcom_config *) rc->rc_buf;
+       rf->rf_lvblen = -1;
+
        dlm_rcom_out(rc);
        dlm_lowcomms_commit_buffer(mh);
 
@@ -412,9 +441,10 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
 
        ls = dlm_find_lockspace_global(hd->h_lockspace);
        if (!ls) {
-               log_print("lockspace %x from %d not found",
-                         hd->h_lockspace, nodeid);
-               send_ls_not_ready(nodeid, rc);
+               log_print("lockspace %x from %d type %x not found",
+                         hd->h_lockspace, nodeid, rc->rc_type);
+               if (rc->rc_type == DLM_RCOM_STATUS)
+                       send_ls_not_ready(nodeid, rc);
                return;
        }
 
index a5e6d184872e0576301b72acb2f765aba3bac24e..cf9f6831bab57c34e9c733e4328a0c1a38ea534f 100644 (file)
@@ -252,6 +252,7 @@ static void recover_list_clear(struct dlm_ls *ls)
        spin_lock(&ls->ls_recover_list_lock);
        list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
                list_del_init(&r->res_recover_list);
+               r->res_recover_locks_count = 0;
                dlm_put_rsb(r);
                ls->ls_recover_list_count--;
        }
index 362e3eff4dc9afef2e8bd02777ed87591636fcfe..650536aa513930948a1c5b5eaf6250aa68c837a2 100644 (file)
@@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
        unsigned long start;
        int error, neg = 0;
 
-       log_debug(ls, "recover %llx", rv->seq);
+       log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
 
        mutex_lock(&ls->ls_recoverd_active);
 
@@ -93,14 +93,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
                goto fail;
        }
 
-       /*
-        * Purge directory-related requests that are saved in requestqueue.
-        * All dir requests from before recovery are invalid now due to the dir
-        * rebuild and will be resent by the requesting nodes.
-        */
-
-       dlm_purge_requestqueue(ls);
-
        /*
         * Wait for all nodes to complete directory rebuild.
         */
@@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
                 */
 
                dlm_recover_rsbs(ls);
+       } else {
+               /*
+                * Other lockspace members may be going through the "neg" steps
+                * while also adding us to the lockspace, in which case they'll
+                * be doing the recover_locks (RS_LOCKS) barrier.
+                */
+               dlm_set_recover_status(ls, DLM_RS_LOCKS);
+
+               error = dlm_recover_locks_wait(ls);
+               if (error) {
+                       log_error(ls, "recover_locks_wait failed %d", error);
+                       goto fail;
+               }
        }
 
        dlm_release_root_list(ls);
 
+       /*
+        * Purge directory-related requests that are saved in requestqueue.
+        * All dir requests from before recovery are invalid now due to the dir
+        * rebuild and will be resent by the requesting nodes.
+        */
+
+       dlm_purge_requestqueue(ls);
+
        dlm_set_recover_status(ls, DLM_RS_DONE);
        error = dlm_recover_done_wait(ls);
        if (error) {
@@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
        dlm_astd_wake();
 
-       log_debug(ls, "recover %llx done: %u ms", rv->seq,
+       log_debug(ls, "recover %llx done: %u ms",
+                 (unsigned long long)rv->seq,
                  jiffies_to_msecs(jiffies - start));
        mutex_unlock(&ls->ls_recoverd_active);
 
@@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
 
  fail:
        dlm_release_root_list(ls);
-       log_debug(ls, "recover %llx error %d", rv->seq, error);
+       log_debug(ls, "recover %llx error %d",
+                 (unsigned long long)rv->seq, error);
        mutex_unlock(&ls->ls_recoverd_active);
        return error;
 }
 
+/* The dlm_ls_start() that created the rv we take here may already have been
+   stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
+   flag set. */
+
 static void do_ls_recovery(struct dlm_ls *ls)
 {
        struct dlm_recover *rv = NULL;
@@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls)
        spin_lock(&ls->ls_recover_lock);
        rv = ls->ls_recover_args;
        ls->ls_recover_args = NULL;
-       clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
+       if (rv && ls->ls_recover_seq == rv->seq)
+               clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
        spin_unlock(&ls->ls_recover_lock);
 
        if (rv) {
index 7b2b089634a2df67d7253a0b316a97294a6f86c6..65008d79c96d2e651091dbd7bcbe47f1e17221ec 100644 (file)
@@ -30,26 +30,36 @@ struct rq_entry {
  * lockspace is enabled on some while still suspended on others.
  */
 
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
+int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
 {
        struct rq_entry *e;
        int length = hd->h_length;
-
-       if (dlm_is_removed(ls, nodeid))
-               return;
+       int rv = 0;
 
        e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
        if (!e) {
                log_print("dlm_add_requestqueue: out of memory\n");
-               return;
+               return 0;
        }
 
        e->nodeid = nodeid;
        memcpy(e->request, hd, length);
 
+       /* We need to check dlm_locking_stopped() after taking the mutex to
+          avoid a race where dlm_recoverd enables locking and runs
+          process_requestqueue between our earlier dlm_locking_stopped check
+          and this addition to the requestqueue. */
+
        mutex_lock(&ls->ls_requestqueue_mutex);
-       list_add_tail(&e->list, &ls->ls_requestqueue);
+       if (dlm_locking_stopped(ls))
+               list_add_tail(&e->list, &ls->ls_requestqueue);
+       else {
+               log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
+               kfree(e);
+               rv = -EAGAIN;
+       }
        mutex_unlock(&ls->ls_requestqueue_mutex);
+       return rv;
 }
 
 int dlm_process_requestqueue(struct dlm_ls *ls)
@@ -120,6 +130,10 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
 {
        uint32_t type = ms->m_type;
 
+       /* the ls is being cleaned up and freed by release_lockspace */
+       if (!ls->ls_count)
+               return 1;
+
        if (dlm_is_removed(ls, nodeid))
                return 1;
 
index 349f0d292d95ec22688d525cfa3d9301dc903148..6a53ea03335dae50b9d22f1c0c156ff01dfd581d 100644 (file)
@@ -13,7 +13,7 @@
 #ifndef __REQUESTQUEUE_DOT_H__
 #define __REQUESTQUEUE_DOT_H__
 
-void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
+int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
 int dlm_process_requestqueue(struct dlm_ls *ls);
 void dlm_wait_requestqueue(struct dlm_ls *ls);
 void dlm_purge_requestqueue(struct dlm_ls *ls);
index 2b0442db67e05cb7b0a275dafd0f7c762d86e050..1f26a2b9eee13f0bd8c1a394d157e78ff5d41728 100644 (file)
@@ -23,7 +23,7 @@
 
 int dir_notify_enable __read_mostly = 1;
 
-static kmem_cache_t *dn_cache __read_mostly;
+static struct kmem_cache *dn_cache __read_mostly;
 
 static void redo_inode_mask(struct inode *inode)
 {
@@ -77,7 +77,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
        inode = filp->f_dentry->d_inode;
        if (!S_ISDIR(inode->i_mode))
                return -ENOTDIR;
-       dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL);
+       dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
        if (dn == NULL)
                return -ENOMEM;
        spin_lock(&inode->i_lock);
index 9af789567e513b4575c9066711b8b6f595c949c7..f9cd5e23ebdf2aaa9e91ca71e3cda95dbe60560a 100644 (file)
@@ -131,7 +131,7 @@ static struct quota_format_type *quota_formats;     /* List of registered formats */
 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
 
 /* SLAB cache for dquot structures */
-static kmem_cache_t *dquot_cachep;
+static struct kmem_cache *dquot_cachep;
 
 int register_quota_format(struct quota_format_type *fmt)
 {
@@ -600,7 +600,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
 {
        struct dquot *dquot;
 
-       dquot = kmem_cache_alloc(dquot_cachep, SLAB_NOFS);
+       dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS);
        if(!dquot)
                return NODQUOT;
 
index f63a7755fe8697ea7469182b1baa487fd1b48c7a..7196f50fe152f630f8d5b839fdfa32e3d2c4a5fd 100644 (file)
@@ -628,7 +628,7 @@ int ecryptfs_decrypt_page(struct file *file, struct page *page)
        num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
        base_extent = (page->index * num_extents_per_page);
        lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
-                                          SLAB_KERNEL);
+                                          GFP_KERNEL);
        if (!lower_page_virt) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
@@ -1334,7 +1334,7 @@ int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
                goto out;
        }
        /* Released in this function */
-       page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER);
+       page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER);
        if (!page_virt) {
                ecryptfs_printk(KERN_ERR, "Out of memory\n");
                rc = -ENOMEM;
@@ -1493,7 +1493,7 @@ int ecryptfs_read_headers(struct dentry *ecryptfs_dentry,
            &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
 
        /* Read the first page from the underlying file */
-       page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER);
+       page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
        if (!page_virt) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n");
index a92ef05eff8f3d1ba52bdf481d14c728f9591eb1..42099e779a568d5ed5ccfc6cc1e8dd8c086cfeea 100644 (file)
@@ -250,7 +250,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        int lower_flags;
 
        /* Released in ecryptfs_release or end of function if failure */
-       file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL);
+       file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL);
        ecryptfs_set_file_private(file, file_info);
        if (!file_info) {
                ecryptfs_printk(KERN_ERR,
index dfcc68484f47068ccb3d256283cfea67857518f6..8a1945a84c3649e2eea7d5ad46cab53ea6d0dca8 100644 (file)
@@ -369,7 +369,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
        BUG_ON(!atomic_read(&lower_dentry->d_count));
        ecryptfs_set_dentry_private(dentry,
                                    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-                                                    SLAB_KERNEL));
+                                                    GFP_KERNEL));
        if (!ecryptfs_dentry_to_private(dentry)) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
@@ -404,7 +404,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
        /* Released in this function */
        page_virt =
            (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
-                                    SLAB_USER);
+                                    GFP_USER);
        if (!page_virt) {
                rc = -ENOMEM;
                ecryptfs_printk(KERN_ERR,
@@ -795,7 +795,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
        /* Released at out_free: label */
        ecryptfs_set_file_private(&fake_ecryptfs_file,
                                  kmem_cache_alloc(ecryptfs_file_info_cache,
-                                                  SLAB_KERNEL));
+                                                  GFP_KERNEL));
        if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
                rc = -ENOMEM;
                goto out;
index c3746f56d1627ab2246d5086731103353f9537d3..745c0f1bfbbdcbd1a2bd4ff2db8848bc572df1fe 100644 (file)
@@ -207,7 +207,7 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
        /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
         * at end of function upon failure */
        auth_tok_list_item =
-           kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL);
+           kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL);
        if (!auth_tok_list_item) {
                ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
                rc = -ENOMEM;
index a78d87d14bafb2256313753cc8aeaeeb6c0b381f..3ede12b259336a4851fabb9873bf615fa2b5d2cb 100644 (file)
@@ -378,7 +378,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
        /* Released in ecryptfs_put_super() */
        ecryptfs_set_superblock_private(sb,
                                        kmem_cache_alloc(ecryptfs_sb_info_cache,
-                                                        SLAB_KERNEL));
+                                                        GFP_KERNEL));
        if (!ecryptfs_superblock_to_private(sb)) {
                ecryptfs_printk(KERN_WARNING, "Out of memory\n");
                rc = -ENOMEM;
@@ -402,7 +402,7 @@ ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
        /* through deactivate_super(sb) from get_sb_nodev() */
        ecryptfs_set_dentry_private(sb->s_root,
                                    kmem_cache_alloc(ecryptfs_dentry_info_cache,
-                                                    SLAB_KERNEL));
+                                                    GFP_KERNEL));
        if (!ecryptfs_dentry_to_private(sb->s_root)) {
                ecryptfs_printk(KERN_ERR,
                                "dentry_info_cache alloc failed\n");
@@ -546,7 +546,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
 }
 
 static struct ecryptfs_cache_info {
-       kmem_cache_t **cache;
+       struct kmem_cache **cache;
        const char *name;
        size_t size;
        void (*ctor)(void*, struct kmem_cache *, unsigned long);
@@ -691,7 +691,7 @@ static ssize_t version_show(struct ecryptfs_obj *obj, char *buff)
 
 static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version);
 
-struct ecryptfs_version_str_map_elem {
+static struct ecryptfs_version_str_map_elem {
        u32 flag;
        char *str;
 } ecryptfs_version_str_map[] = {
index 825757ae48676d6a488f64413316a7429b55fbbc..eaa5daaf106eec9aebb1271a1e3ae554a813c0a1 100644 (file)
@@ -50,7 +50,7 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
        struct inode *inode = NULL;
 
        ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
-                                         SLAB_KERNEL);
+                                         GFP_KERNEL);
        if (unlikely(!ecryptfs_inode))
                goto out;
        ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
index b3f50651eb6bd1a979a2d2442c4294b48141780b..dfebf21289f4904a7ae784e1ab4c57ce97bafd6e 100644 (file)
@@ -52,12 +52,12 @@ static struct pt_types sgi_pt_types[] = {
 };
 
 
-static kmem_cache_t * efs_inode_cachep;
+static struct kmem_cache * efs_inode_cachep;
 
 static struct inode *efs_alloc_inode(struct super_block *sb)
 {
        struct efs_inode_info *ei;
-       ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL);
+       ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -68,7 +68,7 @@ static void efs_destroy_inode(struct inode *inode)
        kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct efs_inode_info *ei = (struct efs_inode_info *) foo;
 
index ae228ec54e948a63b25bb39f34577baa22fd06ed..88a6f8d0b88e2bda091e687a3213bd3107714b84 100644 (file)
@@ -283,10 +283,10 @@ static struct mutex epmutex;
 static struct poll_safewake psw;
 
 /* Slab cache used to allocate "struct epitem" */
-static kmem_cache_t *epi_cache __read_mostly;
+static struct kmem_cache *epi_cache __read_mostly;
 
 /* Slab cache used to allocate "struct eppoll_entry" */
-static kmem_cache_t *pwq_cache __read_mostly;
+static struct kmem_cache *pwq_cache __read_mostly;
 
 /* Virtual fs used to allocate inodes for eventpoll files */
 static struct vfsmount *eventpoll_mnt __read_mostly;
@@ -961,7 +961,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
        struct epitem *epi = ep_item_from_epqueue(pt);
        struct eppoll_entry *pwq;
 
-       if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) {
+       if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
                init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
                pwq->whead = whead;
                pwq->base = epi;
@@ -1004,7 +1004,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
        struct ep_pqueue epq;
 
        error = -ENOMEM;
-       if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL)))
+       if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
                goto eexit_1;
 
        /* Item initialization follow here ... */
index d993ea1a81aee7b60cdc11ffc9ee18dd09a8658d..add0e03c3ea908a6baf81d3b16b1065716c5a8ff 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -404,7 +404,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
                bprm->loader += stack_base;
        bprm->exec += stack_base;
 
-       mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!mpnt)
                return -ENOMEM;
 
@@ -1515,7 +1515,8 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
                ispipe = 1;
        } else
                file = filp_open(corename,
-                                O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE, 0600);
+                                O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
+                                0600);
        if (IS_ERR(file))
                goto fail_unlock;
        inode = file->f_dentry->d_inode;
index 1dfba77eab10dc348e0ac3c4fa4e23d2448ac281..e3cf8c81507f64ef58c99a54c382e626051f1264 100644 (file)
@@ -44,6 +44,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
                if (!S_ISDIR(inode->i_mode))
                        flags &= ~EXT2_DIRSYNC_FL;
 
+               mutex_lock(&inode->i_mutex);
                oldflags = ei->i_flags;
 
                /*
@@ -53,13 +54,16 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
                 * This test looks nicer. Thanks to Pauline Middelink
                 */
                if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
-                       if (!capable(CAP_LINUX_IMMUTABLE))
+                       if (!capable(CAP_LINUX_IMMUTABLE)) {
+                               mutex_unlock(&inode->i_mutex);
                                return -EPERM;
+                       }
                }
 
                flags = flags & EXT2_FL_USER_MODIFIABLE;
                flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
                ei->i_flags = flags;
+               mutex_unlock(&inode->i_mutex);
 
                ext2_set_inode_flags(inode);
                inode->i_ctime = CURRENT_TIME_SEC;
index d8b9abd95d07e4bf2fa81020856cd4026b766790..255cef5f7420f7d10fe4282c47e25bf9e597342e 100644 (file)
@@ -135,12 +135,12 @@ static void ext2_put_super (struct super_block * sb)
        return;
 }
 
-static kmem_cache_t * ext2_inode_cachep;
+static struct kmem_cache * ext2_inode_cachep;
 
 static struct inode *ext2_alloc_inode(struct super_block *sb)
 {
        struct ext2_inode_info *ei;
-       ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL);
+       ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
 #ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -156,7 +156,7 @@ static void ext2_destroy_inode(struct inode *inode)
        kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
 
@@ -1090,8 +1090,10 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
 {
        struct super_block *sb = dentry->d_sb;
        struct ext2_sb_info *sbi = EXT2_SB(sb);
+       struct ext2_super_block *es = sbi->s_es;
        unsigned long overhead;
        int i;
+       u64 fsid;
 
        if (test_opt (sb, MINIX_DF))
                overhead = 0;
@@ -1104,7 +1106,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
                 * All of the blocks before first_data_block are
                 * overhead
                 */
-               overhead = le32_to_cpu(sbi->s_es->s_first_data_block);
+               overhead = le32_to_cpu(es->s_first_data_block);
 
                /*
                 * Add the overhead attributed to the superblock and
@@ -1125,14 +1127,18 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
 
        buf->f_type = EXT2_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_blocks = le32_to_cpu(sbi->s_es->s_blocks_count) - overhead;
+       buf->f_blocks = le32_to_cpu(es->s_blocks_count) - overhead;
        buf->f_bfree = ext2_count_free_blocks(sb);
-       buf->f_bavail = buf->f_bfree - le32_to_cpu(sbi->s_es->s_r_blocks_count);
-       if (buf->f_bfree < le32_to_cpu(sbi->s_es->s_r_blocks_count))
+       buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
+       if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
                buf->f_bavail = 0;
-       buf->f_files = le32_to_cpu(sbi->s_es->s_inodes_count);
-       buf->f_ffree = ext2_count_free_inodes (sb);
+       buf->f_files = le32_to_cpu(es->s_inodes_count);
+       buf->f_ffree = ext2_count_free_inodes(sb);
        buf->f_namelen = EXT2_NAME_LEN;
+       fsid = le64_to_cpup((void *)es->s_uuid) ^
+              le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+       buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+       buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
        return 0;
 }
 
index af52a7f8b291d0baf3542246fb90703032cfbecc..247efd0b51d694e1206709672d8234e40b5405ed 100644 (file)
@@ -342,12 +342,9 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
        if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
                return;
 
-       lock_super(sb);
-       EXT2_SB(sb)->s_es->s_feature_compat |=
-               cpu_to_le32(EXT2_FEATURE_COMPAT_EXT_ATTR);
+       EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
        sb->s_dirt = 1;
        mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
-       unlock_super(sb);
 }
 
 /*
index 704cd44a40c256e10ccc097aae36f35dd32309ab..e77766a8b3f07a8f3e37f7f66ce917b5e79ea061 100644 (file)
@@ -5,7 +5,7 @@
 obj-$(CONFIG_EXT3_FS) += ext3.o
 
 ext3-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-          ioctl.o namei.o super.o symlink.o hash.o resize.o
+          ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
 
 ext3-$(CONFIG_EXT3_FS_XATTR)    += xattr.o xattr_user.o xattr_trusted.o
 ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
index b41a7d7e20f006bc7e85a6e4b2315cf38ee2fdc9..22161740ba295e7e1e650c571544cb9e6c2b5f3b 100644 (file)
@@ -144,7 +144,7 @@ restart:
 
        printk("Block Allocation Reservation Windows Map (%s):\n", fn);
        while (n) {
-               rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
+               rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
                if (verbose)
                        printk("reservation window 0x%p "
                               "start:  %lu, end:  %lu\n",
@@ -730,7 +730,7 @@ find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
                here = 0;
 
        p = ((char *)bh->b_data) + (here >> 3);
-       r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+       r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
        next = (r - ((char *)bh->b_data)) << 3;
 
        if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
@@ -949,7 +949,7 @@ static int find_next_reservable_window(
 
                prev = rsv;
                next = rb_next(&rsv->rsv_node);
-               rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node);
+               rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
 
                /*
                 * Reached the last reservation, we can just append to the
@@ -1148,7 +1148,7 @@ retry:
         * check if the first free block is within the
         * free space we just reserved
         */
-       if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+       if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
                return 0;               /* success */
        /*
         * if the first free bit we found is out of the reservable space
@@ -1193,7 +1193,7 @@ static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
        if (!next)
                my_rsv->rsv_end += size;
        else {
-               next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+               next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
 
                if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
                        my_rsv->rsv_end += size;
@@ -1271,7 +1271,7 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
        }
        /*
         * grp_goal is a group relative block number (if there is a goal)
-        * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
+        * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
         * first block is a filesystem wide block number
         * first block is the block number of the first block in this group
         */
@@ -1307,10 +1307,14 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
                        if (!goal_in_my_reservation(&my_rsv->rsv_window,
                                                        grp_goal, group, sb))
                                grp_goal = -1;
-               } else if (grp_goal > 0 &&
-                         (my_rsv->rsv_end-grp_goal+1) < *count)
-                       try_to_extend_reservation(my_rsv, sb,
-                                       *count-my_rsv->rsv_end + grp_goal - 1);
+               } else if (grp_goal >= 0) {
+                       int curr = my_rsv->rsv_end -
+                                       (grp_goal + group_first_block) + 1;
+
+                       if (curr < *count)
+                               try_to_extend_reservation(my_rsv, sb,
+                                                       *count - curr);
+               }
 
                if ((my_rsv->rsv_start > group_last_block) ||
                                (my_rsv->rsv_end < group_first_block)) {
@@ -1511,10 +1515,8 @@ retry_alloc:
                if (group_no >= ngroups)
                        group_no = 0;
                gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-               if (!gdp) {
-                       *errp = -EIO;
-                       goto out;
-               }
+               if (!gdp)
+                       goto io_error;
                free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
                /*
                 * skip this group if the number of
@@ -1548,6 +1550,7 @@ retry_alloc:
         */
        if (my_rsv) {
                my_rsv = NULL;
+               windowsz = 0;
                group_no = goal_group;
                goto retry_alloc;
        }
index d0b54f30b914e5304f367252e5f6a3bd96ac8d38..5a9313ecd4ef7771a133fbd70d032784dd9b7dfd 100644 (file)
@@ -154,6 +154,9 @@ static int ext3_readdir(struct file * filp,
                        ext3_error (sb, "ext3_readdir",
                                "directory #%lu contains a hole at offset %lu",
                                inode->i_ino, (unsigned long)filp->f_pos);
+                       /* corrupt size?  Maybe no more blocks to read */
+                       if (filp->f_pos > inode->i_blocks << 9)
+                               break;
                        filp->f_pos += sb->s_blocksize - offset;
                        continue;
                }
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
new file mode 100644 (file)
index 0000000..e1f91fd
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext3 and JBD
+ */
+
+#include <linux/ext3_jbd.h>
+
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = journal_get_undo_access(handle, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = journal_get_write_access(handle, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext3_journal_forget(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = journal_forget(handle, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+                               unsigned long blocknr, struct buffer_head *bh)
+{
+       int err = journal_revoke(handle, blocknr, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext3_journal_get_create_access(const char *where,
+                               handle_t *handle, struct buffer_head *bh)
+{
+       int err = journal_get_create_access(handle, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext3_journal_dirty_metadata(const char *where,
+                               handle_t *handle, struct buffer_head *bh)
+{
+       int err = journal_dirty_metadata(handle, bh);
+       if (err)
+               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
index 03ba5bcab18633725372096cd94c9e52b6aa83cc..beaf25f5112fd9f427a393bac3b9f12b634e14de 100644 (file)
@@ -1148,37 +1148,102 @@ static int do_journal_get_write_access(handle_t *handle,
        return ext3_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext3_prepare_failure(struct file *file, struct page *page,
+                               unsigned from, unsigned to)
+{
+       struct address_space *mapping;
+       struct buffer_head *bh, *head, *next;
+       unsigned block_start, block_end;
+       unsigned blocksize;
+       int ret;
+       handle_t *handle = ext3_journal_current_handle();
+
+       mapping = page->mapping;
+       if (ext3_should_writeback_data(mapping->host)) {
+               /* optimization: no constraints about data */
+skip:
+               return ext3_journal_stop(handle);
+       }
+
+       head = page_buffers(page);
+       blocksize = head->b_size;
+       for (   bh = head, block_start = 0;
+               bh != head || !block_start;
+               block_start = block_end, bh = next)
+       {
+               next = bh->b_this_page;
+               block_end = block_start + blocksize;
+               if (block_end <= from)
+                       continue;
+               if (block_start >= to) {
+                       block_start = to;
+                       break;
+               }
+               if (!buffer_mapped(bh))
+               /* prepare_write failed on this bh */
+                       break;
+               if (ext3_should_journal_data(mapping->host)) {
+                       ret = do_journal_get_write_access(handle, bh);
+                       if (ret) {
+                               ext3_journal_stop(handle);
+                               return ret;
+                       }
+               }
+       /*
+        * block_start here becomes the first block where the current iteration
+        * of prepare_write failed.
+        */
+       }
+       if (block_start <= from)
+               goto skip;
+
+       /* commit allocated and zeroed buffers */
+       return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext3_prepare_write(struct file *file, struct page *page,
                              unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
-       int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+       int ret, ret2;
+       int needed_blocks = ext3_writepage_trans_blocks(inode);
        handle_t *handle;
        int retries = 0;
 
 retry:
        handle = ext3_journal_start(inode, needed_blocks);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
-       }
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
        if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
                ret = nobh_prepare_write(page, from, to, ext3_get_block);
        else
                ret = block_prepare_write(page, from, to, ext3_get_block);
        if (ret)
-               goto prepare_write_failed;
+               goto failure;
 
        if (ext3_should_journal_data(inode)) {
                ret = walk_page_buffers(handle, page_buffers(page),
                                from, to, NULL, do_journal_get_write_access);
+               if (ret)
+                       /* fatal error, just put the handle and return */
+                       journal_stop(handle);
        }
-prepare_write_failed:
-       if (ret)
-               ext3_journal_stop(handle);
+       return ret;
+
+failure:
+       ret2 = ext3_prepare_failure(file, page, from, to);
+       if (ret2 < 0)
+               return ret2;
        if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
-out:
+       /* retry number exceeded, or other error like -EDQUOT */
        return ret;
 }
 
index 906731a20f1ae37c4f7be8999d72d7986cbabadd..60d2f9dbdb002b3bcc8eb277ec2358f6c6040368 100644 (file)
@@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file,
                                           dir->i_sb->s_blocksize -
                                           EXT3_DIR_REC_LEN(0));
        for (; de < top; de = ext3_next_entry(de)) {
+               if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+                                       (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
+                                               +((char *)de - bh->b_data))) {
+                       /* On error, skip the f_pos to the next block. */
+                       dir_file->f_pos = (dir_file->f_pos |
+                                       (dir->i_sb->s_blocksize - 1)) + 1;
+                       brelse (bh);
+                       return count;
+               }
                ext3fs_dirhash(de->name, de->name_len, hinfo);
                if ((hinfo->hash < start_hash) ||
                    ((hinfo->hash == start_hash) &&
index afc2d4f42d7782800f6d65dfc49c6aebc1925169..580b8a6ca979a67b2412a9b8b2f16aaa4e232b92 100644 (file)
@@ -436,7 +436,7 @@ static void ext3_put_super (struct super_block * sb)
        return;
 }
 
-static kmem_cache_t *ext3_inode_cachep;
+static struct kmem_cache *ext3_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -445,7 +445,7 @@ static struct inode *ext3_alloc_inode(struct super_block *sb)
 {
        struct ext3_inode_info *ei;
 
-       ei = kmem_cache_alloc(ext3_inode_cachep, SLAB_NOFS);
+       ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
 #ifdef CONFIG_EXT3_FS_POSIX_ACL
@@ -462,7 +462,7 @@ static void ext3_destroy_inode(struct inode *inode)
        kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
 
@@ -1264,6 +1264,12 @@ static void ext3_orphan_cleanup (struct super_block * sb,
                return;
        }
 
+       if (bdev_read_only(sb->s_bdev)) {
+               printk(KERN_ERR "EXT3-fs: write access "
+                       "unavailable, skipping orphan cleanup.\n");
+               return;
+       }
+
        if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
                if (es->s_last_orphan)
                        jbd_debug(1, "Errors on filesystem, "
@@ -2387,6 +2393,7 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
        struct ext3_super_block *es = sbi->s_es;
        ext3_fsblk_t overhead;
        int i;
+       u64 fsid;
 
        if (test_opt (sb, MINIX_DF))
                overhead = 0;
@@ -2433,6 +2440,10 @@ static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
        buf->f_files = le32_to_cpu(es->s_inodes_count);
        buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
        buf->f_namelen = EXT3_NAME_LEN;
+       fsid = le64_to_cpup((void *)es->s_uuid) ^
+              le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+       buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+       buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
        return 0;
 }
 
index f86f2482f01df3a87b8a25ef9941fcc5a3a68b93..99857a400f4be217dcf92e5f16ac6c62783b62e2 100644 (file)
@@ -459,14 +459,11 @@ static void ext3_xattr_update_super_block(handle_t *handle,
        if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
                return;
 
-       lock_super(sb);
        if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-               EXT3_SB(sb)->s_es->s_feature_compat |=
-                       cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR);
+               EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
                sb->s_dirt = 1;
                ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
        }
-       unlock_super(sb);
 }
 
 /*
index a6acb96ebeb9b004324632cb867412850f75938d..ae6e7e502ac9c0d5585370ba0c051410ab766ad8 100644 (file)
@@ -5,7 +5,8 @@
 obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
 
 ext4dev-y      := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-          ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o
+                  ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
+                  ext4_jbd2.o
 
 ext4dev-$(CONFIG_EXT4DEV_FS_XATTR)     += xattr.o xattr_user.o xattr_trusted.o
 ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o
index 5d45582f9517e35bacb67405d737190aaef128be..c4dd1103ccf1033f70af475bbf26ff3a2c107cc8 100644 (file)
@@ -165,7 +165,7 @@ restart:
 
        printk("Block Allocation Reservation Windows Map (%s):\n", fn);
        while (n) {
-               rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node);
+               rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
                if (verbose)
                        printk("reservation window 0x%p "
                               "start:  %llu, end:  %llu\n",
@@ -747,7 +747,7 @@ find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
                here = 0;
 
        p = ((char *)bh->b_data) + (here >> 3);
-       r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+       r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
        next = (r - ((char *)bh->b_data)) << 3;
 
        if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
@@ -966,7 +966,7 @@ static int find_next_reservable_window(
 
                prev = rsv;
                next = rb_next(&rsv->rsv_node);
-               rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node);
+               rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
 
                /*
                 * Reached the last reservation, we can just append to the
@@ -1165,7 +1165,7 @@ retry:
         * check if the first free block is within the
         * free space we just reserved
         */
-       if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+       if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
                return 0;               /* success */
        /*
         * if the first free bit we found is out of the reservable space
@@ -1210,7 +1210,7 @@ static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
        if (!next)
                my_rsv->rsv_end += size;
        else {
-               next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node);
+               next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
 
                if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
                        my_rsv->rsv_end += size;
@@ -1288,7 +1288,7 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
        }
        /*
         * grp_goal is a group relative block number (if there is a goal)
-        * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
+        * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
         * first block is a filesystem wide block number
         * first block is the block number of the first block in this group
         */
@@ -1324,10 +1324,14 @@ ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
                        if (!goal_in_my_reservation(&my_rsv->rsv_window,
                                                        grp_goal, group, sb))
                                grp_goal = -1;
-               } else if (grp_goal > 0 &&
-                         (my_rsv->rsv_end-grp_goal+1) < *count)
-                       try_to_extend_reservation(my_rsv, sb,
-                                       *count-my_rsv->rsv_end + grp_goal - 1);
+               } else if (grp_goal >= 0) {
+                       int curr = my_rsv->rsv_end -
+                                       (grp_goal + group_first_block) + 1;
+
+                       if (curr < *count)
+                               try_to_extend_reservation(my_rsv, sb,
+                                                       *count - curr);
+               }
 
                if ((my_rsv->rsv_start > group_last_block) ||
                                (my_rsv->rsv_end < group_first_block)) {
@@ -1525,10 +1529,8 @@ retry_alloc:
                if (group_no >= ngroups)
                        group_no = 0;
                gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
-               if (!gdp) {
-                       *errp = -EIO;
-                       goto out;
-               }
+               if (!gdp)
+                       goto io_error;
                free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
                /*
                 * skip this group if the number of
@@ -1562,6 +1564,7 @@ retry_alloc:
         */
        if (my_rsv) {
                my_rsv = NULL;
+               windowsz = 0;
                group_no = goal_group;
                goto retry_alloc;
        }
index f8595787a70e0d70fb570f05fd887e70fa91027a..f2ed3e7fb9f5045566ba212c3238bf1e2c3a707f 100644 (file)
@@ -153,6 +153,9 @@ static int ext4_readdir(struct file * filp,
                        ext4_error (sb, "ext4_readdir",
                                "directory #%lu contains a hole at offset %lu",
                                inode->i_ino, (unsigned long)filp->f_pos);
+                       /* corrupt size?  Maybe no more blocks to read */
+                       if (filp->f_pos > inode->i_blocks << 9)
+                               break;
                        filp->f_pos += sb->s_blocksize - offset;
                        continue;
                }
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
new file mode 100644 (file)
index 0000000..d6afe4e
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Interface between ext4 and JBD
+ */
+
+#include <linux/ext4_jbd2.h>
+
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_undo_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_write_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext4_journal_forget(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = jbd2_journal_forget(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+                               ext4_fsblk_t blocknr, struct buffer_head *bh)
+{
+       int err = jbd2_journal_revoke(handle, blocknr, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext4_journal_get_create_access(const char *where,
+                               handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_create_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+int __ext4_journal_dirty_metadata(const char *where,
+                               handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_dirty_metadata(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
index 2608dce18f3e7f50461e1fe84225a24555b1da26..dc2724fa7622b2b1492d0d547425e6dcc1e7e35c 100644 (file)
@@ -48,7 +48,7 @@
  * ext_pblock:
  * combine low and high parts of physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
+static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
 {
        ext4_fsblk_t block;
 
@@ -61,7 +61,7 @@ static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
  * idx_pblock:
  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
  */
-static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
+static ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
 {
        ext4_fsblk_t block;
 
@@ -75,7 +75,7 @@ static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
  * stores a large physical block number into an extent struct,
  * breaking it into parts
  */
-static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
+static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
 {
        ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
        ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -86,7 +86,7 @@ static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb
  * stores a large physical block number into an index struct,
  * breaking it into parts
  */
-static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
+static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
 {
        ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
        ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
@@ -186,7 +186,8 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
                depth = path->p_depth;
 
                /* try to predict block placement */
-               if ((ex = path[depth].p_ext))
+               ex = path[depth].p_ext;
+               if (ex)
                        return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
 
                /* it looks like index is empty;
@@ -215,7 +216,7 @@ ext4_ext_new_block(handle_t *handle, struct inode *inode,
        return newblock;
 }
 
-static inline int ext4_ext_space_block(struct inode *inode)
+static int ext4_ext_space_block(struct inode *inode)
 {
        int size;
 
@@ -228,7 +229,7 @@ static inline int ext4_ext_space_block(struct inode *inode)
        return size;
 }
 
-static inline int ext4_ext_space_block_idx(struct inode *inode)
+static int ext4_ext_space_block_idx(struct inode *inode)
 {
        int size;
 
@@ -241,7 +242,7 @@ static inline int ext4_ext_space_block_idx(struct inode *inode)
        return size;
 }
 
-static inline int ext4_ext_space_root(struct inode *inode)
+static int ext4_ext_space_root(struct inode *inode)
 {
        int size;
 
@@ -255,7 +256,7 @@ static inline int ext4_ext_space_root(struct inode *inode)
        return size;
 }
 
-static inline int ext4_ext_space_root_idx(struct inode *inode)
+static int ext4_ext_space_root_idx(struct inode *inode)
 {
        int size;
 
@@ -476,13 +477,12 @@ ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
 
        /* account possible depth increase */
        if (!path) {
-               path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
+               path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
                                GFP_NOFS);
                if (!path)
                        return ERR_PTR(-ENOMEM);
                alloc = 1;
        }
-       memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
        path[0].p_hdr = eh;
 
        /* walk through the tree */
@@ -543,7 +543,8 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
        struct ext4_extent_idx *ix;
        int len, err;
 
-       if ((err = ext4_ext_get_access(handle, inode, curp)))
+       err = ext4_ext_get_access(handle, inode, curp);
+       if (err)
                return err;
 
        BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
@@ -641,10 +642,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
         * We need this to handle errors and free blocks
         * upon them.
         */
-       ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
+       ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
        if (!ablocks)
                return -ENOMEM;
-       memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
 
        /* allocate all needed blocks */
        ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
@@ -665,7 +665,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        }
        lock_buffer(bh);
 
-       if ((err = ext4_journal_get_create_access(handle, bh)))
+       err = ext4_journal_get_create_access(handle, bh);
+       if (err)
                goto cleanup;
 
        neh = ext_block_hdr(bh);
@@ -702,18 +703,21 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
-       if ((err = ext4_journal_dirty_metadata(handle, bh)))
+       err = ext4_journal_dirty_metadata(handle, bh);
+       if (err)
                goto cleanup;
        brelse(bh);
        bh = NULL;
 
        /* correct old leaf */
        if (m) {
-               if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+               err = ext4_ext_get_access(handle, inode, path + depth);
+               if (err)
                        goto cleanup;
                path[depth].p_hdr->eh_entries =
                     cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
-               if ((err = ext4_ext_dirty(handle, inode, path + depth)))
+               err = ext4_ext_dirty(handle, inode, path + depth);
+               if (err)
                        goto cleanup;
 
        }
@@ -736,7 +740,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                }
                lock_buffer(bh);
 
-               if ((err = ext4_journal_get_create_access(handle, bh)))
+               err = ext4_journal_get_create_access(handle, bh);
+               if (err)
                        goto cleanup;
 
                neh = ext_block_hdr(bh);
@@ -780,7 +785,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
 
-               if ((err = ext4_journal_dirty_metadata(handle, bh)))
+               err = ext4_journal_dirty_metadata(handle, bh);
+               if (err)
                        goto cleanup;
                brelse(bh);
                bh = NULL;
@@ -800,9 +806,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        }
 
        /* insert new index */
-       if (err)
-               goto cleanup;
-
        err = ext4_ext_insert_index(handle, inode, path + at,
                                    le32_to_cpu(border), newblock);
 
@@ -857,7 +860,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        }
        lock_buffer(bh);
 
-       if ((err = ext4_journal_get_create_access(handle, bh))) {
+       err = ext4_journal_get_create_access(handle, bh);
+       if (err) {
                unlock_buffer(bh);
                goto out;
        }
@@ -877,11 +881,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
-       if ((err = ext4_journal_dirty_metadata(handle, bh)))
+       err = ext4_journal_dirty_metadata(handle, bh);
+       if (err)
                goto out;
 
        /* create index in new top-level index: num,max,pointer */
-       if ((err = ext4_ext_get_access(handle, inode, curp)))
+       err = ext4_ext_get_access(handle, inode, curp);
+       if (err)
                goto out;
 
        curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
@@ -1073,27 +1079,31 @@ int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
         */
        k = depth - 1;
        border = path[depth].p_ext->ee_block;
-       if ((err = ext4_ext_get_access(handle, inode, path + k)))
+       err = ext4_ext_get_access(handle, inode, path + k);
+       if (err)
                return err;
        path[k].p_idx->ei_block = border;
-       if ((err = ext4_ext_dirty(handle, inode, path + k)))
+       err = ext4_ext_dirty(handle, inode, path + k);
+       if (err)
                return err;
 
        while (k--) {
                /* change all left-side indexes */
                if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
                        break;
-               if ((err = ext4_ext_get_access(handle, inode, path + k)))
+               err = ext4_ext_get_access(handle, inode, path + k);
+               if (err)
                        break;
                path[k].p_idx->ei_block = border;
-               if ((err = ext4_ext_dirty(handle, inode, path + k)))
+               err = ext4_ext_dirty(handle, inode, path + k);
+               if (err)
                        break;
        }
 
        return err;
 }
 
-static int inline
+static int
 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
                                struct ext4_extent *ex2)
 {
@@ -1145,7 +1155,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
                                le16_to_cpu(newext->ee_len),
                                le32_to_cpu(ex->ee_block),
                                le16_to_cpu(ex->ee_len), ext_pblock(ex));
-               if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+               err = ext4_ext_get_access(handle, inode, path + depth);
+               if (err)
                        return err;
                ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
                                         + le16_to_cpu(newext->ee_len));
@@ -1195,7 +1206,8 @@ repeat:
 has_space:
        nearex = path[depth].p_ext;
 
-       if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+       err = ext4_ext_get_access(handle, inode, path + depth);
+       if (err)
                goto cleanup;
 
        if (!nearex) {
@@ -1383,7 +1395,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
        return err;
 }
 
-static inline void
+static void
 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
                        __u32 len, __u32 start, int type)
 {
@@ -1401,7 +1413,7 @@ ext4_ext_put_in_cache(struct inode *inode, __u32 block,
  * calculate boundaries of the gap that the requested block fits into
  * and cache this gap
  */
-static inline void
+static void
 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
                                unsigned long block)
 {
@@ -1442,7 +1454,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
        ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
 }
 
-static inline int
+static int
 ext4_ext_in_cache(struct inode *inode, unsigned long block,
                        struct ext4_extent *ex)
 {
@@ -1489,10 +1501,12 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
        path--;
        leaf = idx_pblock(path->p_idx);
        BUG_ON(path->p_hdr->eh_entries == 0);
-       if ((err = ext4_ext_get_access(handle, inode, path)))
+       err = ext4_ext_get_access(handle, inode, path);
+       if (err)
                return err;
        path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
-       if ((err = ext4_ext_dirty(handle, inode, path)))
+       err = ext4_ext_dirty(handle, inode, path);
+       if (err)
                return err;
        ext_debug("index is empty, remove it, free block %llu\n", leaf);
        bh = sb_find_get_block(inode->i_sb, leaf);
@@ -1509,7 +1523,7 @@ int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
  * the caller should calculate credits under truncate_mutex and
  * pass the actual path.
  */
-int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
+int ext4_ext_calc_credits_for_insert(struct inode *inode,
                                                struct ext4_ext_path *path)
 {
        int depth, needed;
@@ -1534,16 +1548,17 @@ int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
 
        /*
         * tree can be full, so it would need to grow in depth:
-        * allocation + old root + new root
+        * we need one credit to modify old root, credits for
+        * new root will be added in split accounting
         */
-       needed += 2 + 1 + 1;
+       needed += 1;
 
        /*
         * Index split can happen, we would need:
         *    allocate intermediate indexes (bitmap + group)
         *  + change two blocks at each level, but root (already included)
         */
-       needed = (depth * 2) + (depth * 2);
+       needed += (depth * 2) + (depth * 2);
 
        /* any allocation modifies superblock */
        needed += 1;
@@ -1718,7 +1733,7 @@ out:
  * ext4_ext_more_to_rm:
  * returns 1 if current index has to be freed (even partial)
  */
-static int inline
+static int
 ext4_ext_more_to_rm(struct ext4_ext_path *path)
 {
        BUG_ON(path->p_idx == NULL);
@@ -1756,12 +1771,11 @@ int ext4_ext_remove_space(struct inode *inode, unsigned long start)
         * We start scanning from right side, freeing all the blocks
         * after i_size and walking into the tree depth-wise.
         */
-       path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
+       path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
        if (path == NULL) {
                ext4_journal_stop(handle);
                return -ENOMEM;
        }
-       memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
        path[0].p_hdr = ext_inode_hdr(inode);
        if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
                err = -EIO;
@@ -1932,7 +1946,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
        mutex_lock(&EXT4_I(inode)->truncate_mutex);
 
        /* check in cache */
-       if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
+       goal = ext4_ext_in_cache(inode, iblock, &newex);
+       if (goal) {
                if (goal == EXT4_EXT_CACHE_GAP) {
                        if (!create) {
                                /* block isn't allocated yet and
@@ -1971,7 +1986,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
         */
        BUG_ON(path[depth].p_ext == NULL && depth != 0);
 
-       if ((ex = path[depth].p_ext)) {
+       ex = path[depth].p_ext;
+       if (ex) {
                unsigned long ee_block = le32_to_cpu(ex->ee_block);
                ext4_fsblk_t ee_start = ext_pblock(ex);
                unsigned short ee_len  = le16_to_cpu(ex->ee_len);
index 0a60ec5a16dbc4bb92645dfafcd644e895d05161..1d85d4ec9598e063968894e1cc9aa592e65dc849 100644 (file)
@@ -1147,37 +1147,102 @@ static int do_journal_get_write_access(handle_t *handle,
        return ext4_journal_get_write_access(handle, bh);
 }
 
+/*
+ * The idea of this helper function is following:
+ * if prepare_write has allocated some blocks, but not all of them, the
+ * transaction must include the content of the newly allocated blocks.
+ * This content is expected to be set to zeroes by block_prepare_write().
+ * 2006/10/14  SAW
+ */
+static int ext4_prepare_failure(struct file *file, struct page *page,
+                               unsigned from, unsigned to)
+{
+       struct address_space *mapping;
+       struct buffer_head *bh, *head, *next;
+       unsigned block_start, block_end;
+       unsigned blocksize;
+       int ret;
+       handle_t *handle = ext4_journal_current_handle();
+
+       mapping = page->mapping;
+       if (ext4_should_writeback_data(mapping->host)) {
+               /* optimization: no constraints about data */
+skip:
+               return ext4_journal_stop(handle);
+       }
+
+       head = page_buffers(page);
+       blocksize = head->b_size;
+       for (   bh = head, block_start = 0;
+               bh != head || !block_start;
+               block_start = block_end, bh = next)
+       {
+               next = bh->b_this_page;
+               block_end = block_start + blocksize;
+               if (block_end <= from)
+                       continue;
+               if (block_start >= to) {
+                       block_start = to;
+                       break;
+               }
+               if (!buffer_mapped(bh))
+               /* prepare_write failed on this bh */
+                       break;
+               if (ext4_should_journal_data(mapping->host)) {
+                       ret = do_journal_get_write_access(handle, bh);
+                       if (ret) {
+                               ext4_journal_stop(handle);
+                               return ret;
+                       }
+               }
+       /*
+        * block_start here becomes the first block where the current iteration
+        * of prepare_write failed.
+        */
+       }
+       if (block_start <= from)
+               goto skip;
+
+       /* commit allocated and zeroed buffers */
+       return mapping->a_ops->commit_write(file, page, from, block_start);
+}
+
 static int ext4_prepare_write(struct file *file, struct page *page,
                              unsigned from, unsigned to)
 {
        struct inode *inode = page->mapping->host;
-       int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+       int ret, ret2;
+       int needed_blocks = ext4_writepage_trans_blocks(inode);
        handle_t *handle;
        int retries = 0;
 
 retry:
        handle = ext4_journal_start(inode, needed_blocks);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
-       }
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
        if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
                ret = nobh_prepare_write(page, from, to, ext4_get_block);
        else
                ret = block_prepare_write(page, from, to, ext4_get_block);
        if (ret)
-               goto prepare_write_failed;
+               goto failure;
 
        if (ext4_should_journal_data(inode)) {
                ret = walk_page_buffers(handle, page_buffers(page),
                                from, to, NULL, do_journal_get_write_access);
+               if (ret)
+                       /* fatal error, just put the handle and return */
+                       journal_stop(handle);
        }
-prepare_write_failed:
-       if (ret)
-               ext4_journal_stop(handle);
+       return ret;
+
+failure:
+       ret2 = ext4_prepare_failure(file, page, from, to);
+       if (ret2 < 0)
+               return ret2;
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
-out:
+       /* retry number exceeded, or other error like -EDQUOT */
        return ret;
 }
 
index 8b1bd03d20f5d6dee84a89001d653810ef3e6706..859990eac50471dc4825e52779800fa133537f08 100644 (file)
@@ -552,6 +552,15 @@ static int htree_dirblock_to_tree(struct file *dir_file,
                                           dir->i_sb->s_blocksize -
                                           EXT4_DIR_REC_LEN(0));
        for (; de < top; de = ext4_next_entry(de)) {
+               if (!ext4_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
+                                       (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+                                               +((char *)de - bh->b_data))) {
+                       /* On error, skip the f_pos to the next block. */
+                       dir_file->f_pos = (dir_file->f_pos |
+                                       (dir->i_sb->s_blocksize - 1)) + 1;
+                       brelse (bh);
+                       return count;
+               }
                ext4fs_dirhash(de->name, de->name_len, hinfo);
                if ((hinfo->hash < start_hash) ||
                    ((hinfo->hash == start_hash) &&
index b4b022aa2bc26c23d923df3fa3641a9665c55ba8..486a641ca71b94fa0ed745a246d57285d01611e1 100644 (file)
@@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb)
        return;
 }
 
-static kmem_cache_t *ext4_inode_cachep;
+static struct kmem_cache *ext4_inode_cachep;
 
 /*
  * Called inside transaction, so use GFP_NOFS
@@ -495,7 +495,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
 {
        struct ext4_inode_info *ei;
 
-       ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS);
+       ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
@@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode)
        kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
 
@@ -1321,6 +1321,12 @@ static void ext4_orphan_cleanup (struct super_block * sb,
                return;
        }
 
+       if (bdev_read_only(sb->s_bdev)) {
+               printk(KERN_ERR "EXT4-fs: write access "
+                       "unavailable, skipping orphan cleanup.\n");
+               return;
+       }
+
        if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
                if (es->s_last_orphan)
                        jbd_debug(1, "Errors on filesystem, "
@@ -2460,6 +2466,7 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
        struct ext4_super_block *es = sbi->s_es;
        ext4_fsblk_t overhead;
        int i;
+       u64 fsid;
 
        if (test_opt (sb, MINIX_DF))
                overhead = 0;
@@ -2506,6 +2513,10 @@ static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
        buf->f_files = le32_to_cpu(es->s_inodes_count);
        buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
        buf->f_namelen = EXT4_NAME_LEN;
+       fsid = le64_to_cpup((void *)es->s_uuid) ^
+              le64_to_cpup((void *)es->s_uuid + sizeof(u64));
+       buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+       buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
        return 0;
 }
 
index 63233cd946a7333bc8079795a1f3d7c32c1b2485..dc969c357aa1d4ef9e207054e506053b8efdc0c6 100644 (file)
@@ -459,14 +459,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
        if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
                return;
 
-       lock_super(sb);
        if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
-               EXT4_SB(sb)->s_es->s_feature_compat |=
-                       cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
+               EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
                sb->s_dirt = 1;
                ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
        }
-       unlock_super(sb);
 }
 
 /*
index 82cc4f59e3bae3302abd291b392eccb90f4e1be4..05c2941c74f28e98073a531d9524aa9bf0664d69 100644 (file)
@@ -34,9 +34,9 @@ static inline int fat_max_cache(struct inode *inode)
        return FAT_MAX_CACHE;
 }
 
-static kmem_cache_t *fat_cache_cachep;
+static struct kmem_cache *fat_cache_cachep;
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct fat_cache *cache = (struct fat_cache *)foo;
 
@@ -63,7 +63,7 @@ void fat_cache_destroy(void)
 
 static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
 {
-       return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
+       return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL);
 }
 
 static inline void fat_cache_free(struct fat_cache *cache)
index 78945b53b0f827fed88da656666ec1dd6fa9aaa9..a9e4688582a2599a7c1df7f3579aa15fe4632a87 100644 (file)
@@ -477,12 +477,12 @@ static void fat_put_super(struct super_block *sb)
        kfree(sbi);
 }
 
-static kmem_cache_t *fat_inode_cachep;
+static struct kmem_cache *fat_inode_cachep;
 
 static struct inode *fat_alloc_inode(struct super_block *sb)
 {
        struct msdos_inode_info *ei;
-       ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL);
+       ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -493,7 +493,7 @@ static void fat_destroy_inode(struct inode *inode)
        kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;
 
index e4f26165f12a22567526a74d2f463aa6d6798547..4740d35e52cd72849ef18cd1879bc452085f8c7f 100644 (file)
@@ -553,7 +553,7 @@ int send_sigurg(struct fown_struct *fown)
 }
 
 static DEFINE_RWLOCK(fasync_lock);
-static kmem_cache_t *fasync_cache __read_mostly;
+static struct kmem_cache *fasync_cache __read_mostly;
 
 /*
  * fasync_helper() is used by some character device drivers (mainly mice)
@@ -567,7 +567,7 @@ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fap
        int result = 0;
 
        if (on) {
-               new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL);
+               new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
                if (!new)
                        return -ENOMEM;
        }
index 8e81775c5dc818bd3b9d66e3fd895524294f5c78..51aef675470fb64984733fe9a710adc7cb6b980a 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -21,7 +21,6 @@
 struct fdtable_defer {
        spinlock_t lock;
        struct work_struct wq;
-       struct timer_list timer;
        struct fdtable *next;
 };
 
@@ -75,24 +74,10 @@ static void __free_fdtable(struct fdtable *fdt)
        kfree(fdt);
 }
 
-static void fdtable_timer(unsigned long data)
-{
-       struct fdtable_defer *fddef = (struct fdtable_defer *)data;
-
-       spin_lock(&fddef->lock);
-       /*
-        * If someone already emptied the queue return.
-        */
-       if (!fddef->next)
-               goto out;
-       if (!schedule_work(&fddef->wq))
-               mod_timer(&fddef->timer, 5);
-out:
-       spin_unlock(&fddef->lock);
-}
-
-static void free_fdtable_work(struct fdtable_defer *f)
+static void free_fdtable_work(struct work_struct *work)
 {
+       struct fdtable_defer *f =
+               container_of(work, struct fdtable_defer, wq);
        struct fdtable *fdt;
 
        spin_lock_bh(&f->lock);
@@ -142,13 +127,8 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
                spin_lock(&fddef->lock);
                fdt->next = fddef->next;
                fddef->next = fdt;
-               /*
-                * vmallocs are handled from the workqueue context.
-                * If the per-cpu workqueue is running, then we
-                * defer work scheduling through a timer.
-                */
-               if (!schedule_work(&fddef->wq))
-                       mod_timer(&fddef->timer, 5);
+               /* vmallocs are handled from the workqueue context */
+               schedule_work(&fddef->wq);
                spin_unlock(&fddef->lock);
                put_cpu_var(fdtable_defer_list);
        }
@@ -351,10 +331,7 @@ static void __devinit fdtable_defer_list_init(int cpu)
 {
        struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
        spin_lock_init(&fddef->lock);
-       INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef);
-       init_timer(&fddef->timer);
-       fddef->timer.data = (unsigned long)fddef;
-       fddef->timer.function = fdtable_timer;
+       INIT_WORK(&fddef->wq, free_fdtable_work);
        fddef->next = NULL;
 }
 
index 4786d51ad3bd59faa1a7a93c7e944f2b6463fa71..0b7ae897cb781ab9c55beff17bda465dec9b6a48 100644 (file)
@@ -46,7 +46,7 @@ extern const struct address_space_operations vxfs_immed_aops;
 
 extern struct inode_operations vxfs_immed_symlink_iops;
 
-kmem_cache_t           *vxfs_inode_cachep;
+struct kmem_cache              *vxfs_inode_cachep;
 
 
 #ifdef DIAGNOSTIC
@@ -103,7 +103,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
                struct vxfs_inode_info  *vip;
                struct vxfs_dinode      *dip;
 
-               if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+               if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
                        goto fail;
                dip = (struct vxfs_dinode *)(bp->b_data + offset);
                memcpy(vip, dip, sizeof(*vip));
@@ -145,7 +145,7 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
                struct vxfs_dinode      *dip;
                caddr_t                 kaddr = (char *)page_address(pp);
 
-               if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL)))
+               if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
                        goto fail;
                dip = (struct vxfs_dinode *)(kaddr + offset);
                memcpy(vip, dip, sizeof(*vip));
index 66571eafbb1eb88afd8acc0e3625bdc0342207df..357764d85ff1e0cc81723f4006f78479e5164fc8 100644 (file)
@@ -19,7 +19,7 @@
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 
-static kmem_cache_t *fuse_req_cachep;
+static struct kmem_cache *fuse_req_cachep;
 
 static struct fuse_conn *fuse_get_conn(struct file *file)
 {
@@ -41,7 +41,7 @@ static void fuse_request_init(struct fuse_req *req)
 
 struct fuse_req *fuse_request_alloc(void)
 {
-       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
+       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
        if (req)
                fuse_request_init(req);
        return req;
index c71a6c092ad9d4f372553dac847c1af91355d048..1cabdb229adb85f3b575c8249f1b09e857aa4246 100644 (file)
@@ -141,9 +141,6 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                struct fuse_req *forget_req;
                struct dentry *parent;
 
-               /* Doesn't hurt to "reset" the validity timeout */
-               fuse_invalidate_entry_cache(entry);
-
                /* For negative dentries, always do a fresh lookup */
                if (!inode)
                        return 0;
@@ -1027,6 +1024,8 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
        if (attr->ia_valid & ATTR_SIZE) {
                unsigned long limit;
                is_truncate = 1;
+               if (IS_SWAPFILE(inode))
+                       return -ETXTBSY;
                limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
                if (limit != RLIM_INFINITY && attr->ia_size > (loff_t) limit) {
                        send_sig(SIGXFSZ, current, 0);
index 763a50daf1c0a7cc7c336a0d2ba99a64e7baeb1c..128f79c40803fb43381da3d56370c74b3a5bff48 100644 (file)
@@ -754,6 +754,42 @@ static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
        return err;
 }
 
+static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
+{
+       struct inode *inode = mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_req *req;
+       struct fuse_bmap_in inarg;
+       struct fuse_bmap_out outarg;
+       int err;
+
+       if (!inode->i_sb->s_bdev || fc->no_bmap)
+               return 0;
+
+       req = fuse_get_req(fc);
+       if (IS_ERR(req))
+               return 0;
+
+       memset(&inarg, 0, sizeof(inarg));
+       inarg.block = block;
+       inarg.blocksize = inode->i_sb->s_blocksize;
+       req->in.h.opcode = FUSE_BMAP;
+       req->in.h.nodeid = get_node_id(inode);
+       req->in.numargs = 1;
+       req->in.args[0].size = sizeof(inarg);
+       req->in.args[0].value = &inarg;
+       req->out.numargs = 1;
+       req->out.args[0].size = sizeof(outarg);
+       req->out.args[0].value = &outarg;
+       request_send(fc, req);
+       err = req->out.h.error;
+       fuse_put_request(fc, req);
+       if (err == -ENOSYS)
+               fc->no_bmap = 1;
+
+       return err ? 0 : outarg.block;
+}
+
 static const struct file_operations fuse_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
@@ -787,6 +823,7 @@ static const struct address_space_operations fuse_file_aops  = {
        .commit_write   = fuse_commit_write,
        .readpages      = fuse_readpages,
        .set_page_dirty = fuse_set_page_dirty,
+       .bmap           = fuse_bmap,
 };
 
 void fuse_init_file_inode(struct inode *inode)
index 91edb8932d905890a342c1f66cbfe5dbdd04f828..b98b20de740562bd32dc81fcf8af415779496f97 100644 (file)
@@ -298,6 +298,9 @@ struct fuse_conn {
            reply, before any other request, and never cleared */
        unsigned conn_error : 1;
 
+       /** Connection successful.  Only set in INIT */
+       unsigned conn_init : 1;
+
        /** Do readpages asynchronously?  Only set in INIT */
        unsigned async_read : 1;
 
@@ -339,6 +342,9 @@ struct fuse_conn {
        /** Is interrupt not implemented by fs? */
        unsigned no_interrupt : 1;
 
+       /** Is bmap not implemented by fs? */
+       unsigned no_bmap : 1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
@@ -365,6 +371,9 @@ struct fuse_conn {
 
        /** Key for lock owner ID scrambling */
        u32 scramble_key[4];
+
+       /** Reserved request for the DESTROY message */
+       struct fuse_req *destroy_req;
 };
 
 static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
index fc42035703702813266c14c66d784ee1ec7e1c20..12450d2b320e771b13b7191403e9c4dd1f8dbf82 100644 (file)
@@ -22,7 +22,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 MODULE_DESCRIPTION("Filesystem in Userspace");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *fuse_inode_cachep;
+static struct kmem_cache *fuse_inode_cachep;
 struct list_head fuse_conn_list;
 DEFINE_MUTEX(fuse_mutex);
 
@@ -39,6 +39,7 @@ struct fuse_mount_data {
        unsigned group_id_present : 1;
        unsigned flags;
        unsigned max_read;
+       unsigned blksize;
 };
 
 static struct inode *fuse_alloc_inode(struct super_block *sb)
@@ -46,7 +47,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        struct inode *inode;
        struct fuse_inode *fi;
 
-       inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL);
+       inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
        if (!inode)
                return NULL;
 
@@ -205,10 +206,23 @@ static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
                fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
 }
 
+static void fuse_send_destroy(struct fuse_conn *fc)
+{
+       struct fuse_req *req = fc->destroy_req;
+       if (req && fc->conn_init) {
+               fc->destroy_req = NULL;
+               req->in.h.opcode = FUSE_DESTROY;
+               req->force = 1;
+               request_send(fc, req);
+               fuse_put_request(fc, req);
+       }
+}
+
 static void fuse_put_super(struct super_block *sb)
 {
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+       fuse_send_destroy(fc);
        spin_lock(&fc->lock);
        fc->connected = 0;
        fc->blocked = 0;
@@ -274,6 +288,7 @@ enum {
        OPT_DEFAULT_PERMISSIONS,
        OPT_ALLOW_OTHER,
        OPT_MAX_READ,
+       OPT_BLKSIZE,
        OPT_ERR
 };
 
@@ -285,14 +300,16 @@ static match_table_t tokens = {
        {OPT_DEFAULT_PERMISSIONS,       "default_permissions"},
        {OPT_ALLOW_OTHER,               "allow_other"},
        {OPT_MAX_READ,                  "max_read=%u"},
+       {OPT_BLKSIZE,                   "blksize=%u"},
        {OPT_ERR,                       NULL}
 };
 
-static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
+static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
        char *p;
        memset(d, 0, sizeof(struct fuse_mount_data));
        d->max_read = ~0;
+       d->blksize = 512;
 
        while ((p = strsep(&opt, ",")) != NULL) {
                int token;
@@ -345,6 +362,12 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
                        d->max_read = value;
                        break;
 
+               case OPT_BLKSIZE:
+                       if (!is_bdev || match_int(&args[0], &value))
+                               return 0;
+                       d->blksize = value;
+                       break;
+
                default:
                        return 0;
                }
@@ -400,6 +423,8 @@ static struct fuse_conn *new_conn(void)
 void fuse_conn_put(struct fuse_conn *fc)
 {
        if (atomic_dec_and_test(&fc->count)) {
+               if (fc->destroy_req)
+                       fuse_request_free(fc->destroy_req);
                mutex_destroy(&fc->inst_mutex);
                kfree(fc);
        }
@@ -456,6 +481,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
                fc->minor = arg->minor;
                fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
+               fc->conn_init = 1;
        }
        fuse_put_request(fc, req);
        fc->blocked = 0;
@@ -500,15 +526,23 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        struct dentry *root_dentry;
        struct fuse_req *init_req;
        int err;
+       int is_bdev = sb->s_bdev != NULL;
 
        if (sb->s_flags & MS_MANDLOCK)
                return -EINVAL;
 
-       if (!parse_fuse_opt((char *) data, &d))
+       if (!parse_fuse_opt((char *) data, &d, is_bdev))
                return -EINVAL;
 
-       sb->s_blocksize = PAGE_CACHE_SIZE;
-       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       if (is_bdev) {
+#ifdef CONFIG_BLOCK
+               if (!sb_set_blocksize(sb, d.blksize))
+                       return -EINVAL;
+#endif
+       } else {
+               sb->s_blocksize = PAGE_CACHE_SIZE;
+               sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       }
        sb->s_magic = FUSE_SUPER_MAGIC;
        sb->s_op = &fuse_super_operations;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -547,6 +581,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (!init_req)
                goto err_put_root;
 
+       if (is_bdev) {
+               fc->destroy_req = fuse_request_alloc();
+               if (!fc->destroy_req)
+                       goto err_put_root;
+       }
+
        mutex_lock(&fuse_mutex);
        err = -EINVAL;
        if (file->private_data)
@@ -598,10 +638,47 @@ static struct file_system_type fuse_fs_type = {
        .kill_sb        = kill_anon_super,
 };
 
+#ifdef CONFIG_BLOCK
+static int fuse_get_sb_blk(struct file_system_type *fs_type,
+                          int flags, const char *dev_name,
+                          void *raw_data, struct vfsmount *mnt)
+{
+       return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
+                          mnt);
+}
+
+static struct file_system_type fuseblk_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "fuseblk",
+       .get_sb         = fuse_get_sb_blk,
+       .kill_sb        = kill_block_super,
+       .fs_flags       = FS_REQUIRES_DEV,
+};
+
+static inline int register_fuseblk(void)
+{
+       return register_filesystem(&fuseblk_fs_type);
+}
+
+static inline void unregister_fuseblk(void)
+{
+       unregister_filesystem(&fuseblk_fs_type);
+}
+#else
+static inline int register_fuseblk(void)
+{
+       return 0;
+}
+
+static inline void unregister_fuseblk(void)
+{
+}
+#endif
+
 static decl_subsys(fuse, NULL, NULL);
 static decl_subsys(connections, NULL, NULL);
 
-static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
+static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
                                 unsigned long flags)
 {
        struct inode * inode = foo;
@@ -617,24 +694,34 @@ static int __init fuse_fs_init(void)
 
        err = register_filesystem(&fuse_fs_type);
        if (err)
-               printk("fuse: failed to register filesystem\n");
-       else {
-               fuse_inode_cachep = kmem_cache_create("fuse_inode",
-                                                     sizeof(struct fuse_inode),
-                                                     0, SLAB_HWCACHE_ALIGN,
-                                                     fuse_inode_init_once, NULL);
-               if (!fuse_inode_cachep) {
-                       unregister_filesystem(&fuse_fs_type);
-                       err = -ENOMEM;
-               }
-       }
+               goto out;
 
+       err = register_fuseblk();
+       if (err)
+               goto out_unreg;
+
+       fuse_inode_cachep = kmem_cache_create("fuse_inode",
+                                             sizeof(struct fuse_inode),
+                                             0, SLAB_HWCACHE_ALIGN,
+                                             fuse_inode_init_once, NULL);
+       err = -ENOMEM;
+       if (!fuse_inode_cachep)
+               goto out_unreg2;
+
+       return 0;
+
+ out_unreg2:
+       unregister_fuseblk();
+ out_unreg:
+       unregister_filesystem(&fuse_fs_type);
+ out:
        return err;
 }
 
 static void fuse_fs_cleanup(void)
 {
        unregister_filesystem(&fuse_fs_type);
+       unregister_fuseblk();
        kmem_cache_destroy(fuse_inode_cachep);
 }
 
index 8c27de8b95682134fcda8c33f7f1e687a4e8e28f..c0791cbacad91e614652be6e10a868099107098e 100644 (file)
@@ -2,6 +2,7 @@ config GFS2_FS
        tristate "GFS2 file system support"
        depends on EXPERIMENTAL
        select FS_POSIX_ACL
+       select CRC32
        help
        A cluster filesystem.
 
index 5f959b8ce4065dae9e240c3db24eb52f8ec3f589..6e80844367ee9b5edd81143bc659ab14834809a5 100644 (file)
@@ -74,11 +74,11 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
 {
        if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
                return -EOPNOTSUPP;
-       if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER))
+       if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
                return -EPERM;
-       if (S_ISLNK(ip->i_di.di_mode))
+       if (S_ISLNK(ip->i_inode.i_mode))
                return -EOPNOTSUPP;
-       if (!access && !S_ISDIR(ip->i_di.di_mode))
+       if (!access && !S_ISDIR(ip->i_inode.i_mode))
                return -EACCES;
 
        return 0;
@@ -145,14 +145,14 @@ out:
 }
 
 /**
- * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something
+ * gfs2_check_acl - Check an ACL to see if we're allowed to do something
  * @inode: the file we want to do something to
  * @mask: what we want to do
  *
  * Returns: errno
  */
 
-int gfs2_check_acl_locked(struct inode *inode, int mask)
+int gfs2_check_acl(struct inode *inode, int mask)
 {
        struct posix_acl *acl = NULL;
        int error;
@@ -170,21 +170,6 @@ int gfs2_check_acl_locked(struct inode *inode, int mask)
        return -EAGAIN;
 }
 
-int gfs2_check_acl(struct inode *inode, int mask)
-{
-       struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_holder i_gh;
-       int error;
-
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-       if (!error) {
-               error = gfs2_check_acl_locked(inode, mask);
-               gfs2_glock_dq_uninit(&i_gh);
-       }
-
-       return error;
-}
-
 static int munge_mode(struct gfs2_inode *ip, mode_t mode)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -198,10 +183,10 @@ static int munge_mode(struct gfs2_inode *ip, mode_t mode)
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (!error) {
                gfs2_assert_withdraw(sdp,
-                               (ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT));
-               ip->i_di.di_mode = mode;
+                               (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
+               ip->i_inode.i_mode = mode;
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -215,12 +200,12 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
        struct posix_acl *acl = NULL, *clone;
        struct gfs2_ea_request er;
-       mode_t mode = ip->i_di.di_mode;
+       mode_t mode = ip->i_inode.i_mode;
        int error;
 
        if (!sdp->sd_args.ar_posix_acl)
                return 0;
-       if (S_ISLNK(ip->i_di.di_mode))
+       if (S_ISLNK(ip->i_inode.i_mode))
                return 0;
 
        memset(&er, 0, sizeof(struct gfs2_ea_request));
@@ -232,7 +217,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
                return error;
        if (!acl) {
                mode &= ~current->fs->umask;
-               if (mode != ip->i_di.di_mode)
+               if (mode != ip->i_inode.i_mode)
                        error = munge_mode(ip, mode);
                return error;
        }
@@ -244,7 +229,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
        posix_acl_release(acl);
        acl = clone;
 
-       if (S_ISDIR(ip->i_di.di_mode)) {
+       if (S_ISDIR(ip->i_inode.i_mode)) {
                er.er_name = GFS2_POSIX_ACL_DEFAULT;
                er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
                error = gfs2_system_eaops.eo_set(ip, &er);
index 05c294fe0d780f9c28a4cef0d8ebdcfa2a5deffc..6751930bfb648e809eb90e028c0caa8458e1e5e8 100644 (file)
@@ -31,7 +31,6 @@ int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
                          struct gfs2_ea_request *er,
                          int *remove, mode_t *mode);
 int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
-int gfs2_check_acl_locked(struct inode *inode, int mask);
 int gfs2_check_acl(struct inode *inode, int mask);
 int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
 int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
index 06e9a8cb45e959b89e744d64168da1dc4655f71b..8240c1ff94f4ef009151b7c1d9d4170c5a192553 100644 (file)
@@ -38,8 +38,8 @@ struct metapath {
 };
 
 typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh,
-                            struct buffer_head *bh, u64 *top,
-                            u64 *bottom, unsigned int height,
+                            struct buffer_head *bh, __be64 *top,
+                            __be64 *bottom, unsigned int height,
                             void *data);
 
 struct strip_mine {
@@ -163,6 +163,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
        if (ip->i_di.di_size) {
                *(__be64 *)(di + 1) = cpu_to_be64(block);
                ip->i_di.di_blocks++;
+               gfs2_set_inode_blocks(&ip->i_inode);
                di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
        }
 
@@ -230,7 +231,7 @@ static int build_height(struct inode *inode, unsigned height)
        struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
        struct gfs2_dinode *di;
        int error;
-       u64 *bp;
+       __be64 *bp;
        u64 bn;
        unsigned n;
 
@@ -255,7 +256,7 @@ static int build_height(struct inode *inode, unsigned height)
                                          GFS2_FORMAT_IN);
                        gfs2_buffer_clear_tail(blocks[n],
                                               sizeof(struct gfs2_meta_header));
-                       bp = (u64 *)(blocks[n]->b_data +
+                       bp = (__be64 *)(blocks[n]->b_data +
                                     sizeof(struct gfs2_meta_header));
                        *bp = cpu_to_be64(blocks[n+1]->b_blocknr);
                        brelse(blocks[n]);
@@ -272,6 +273,7 @@ static int build_height(struct inode *inode, unsigned height)
        *(__be64 *)(di + 1) = cpu_to_be64(bn);
        ip->i_di.di_height += new_height;
        ip->i_di.di_blocks += new_height;
+       gfs2_set_inode_blocks(&ip->i_inode);
        di->di_height = cpu_to_be16(ip->i_di.di_height);
        di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
        brelse(dibh);
@@ -360,15 +362,15 @@ static void find_metapath(struct gfs2_inode *ip, u64 block,
  * metadata tree.
  */
 
-static inline u64 *metapointer(struct buffer_head *bh, int *boundary,
+static inline __be64 *metapointer(struct buffer_head *bh, int *boundary,
                               unsigned int height, const struct metapath *mp)
 {
        unsigned int head_size = (height > 0) ?
                sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
-       u64 *ptr;
+       __be64 *ptr;
        *boundary = 0;
-       ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height];
-       if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size))
+       ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
+       if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
                *boundary = 1;
        return ptr;
 }
@@ -394,7 +396,7 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
                        int *new, u64 *block)
 {
        int boundary;
-       u64 *ptr = metapointer(bh, &boundary, height, mp);
+       __be64 *ptr = metapointer(bh, &boundary, height, mp);
 
        if (*ptr) {
                *block = be64_to_cpu(*ptr);
@@ -415,17 +417,35 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
 
        *ptr = cpu_to_be64(*block);
        ip->i_di.di_blocks++;
+       gfs2_set_inode_blocks(&ip->i_inode);
 
        *new = 1;
        return 0;
 }
 
+static inline void bmap_lock(struct inode *inode, int create)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       if (create)
+               down_write(&ip->i_rw_mutex);
+       else
+               down_read(&ip->i_rw_mutex);
+}
+
+static inline void bmap_unlock(struct inode *inode, int create)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       if (create)
+               up_write(&ip->i_rw_mutex);
+       else
+               up_read(&ip->i_rw_mutex);
+}
+
 /**
- * gfs2_block_pointers - Map a block from an inode to a disk block
+ * gfs2_block_map - Map a block from an inode to a disk block
  * @inode: The inode
  * @lblock: The logical block number
- * @map_bh: The bh to be mapped
- * @mp: metapath to use
+ * @bh_map: The bh to be mapped
  *
  * Find the block number on the current device which corresponds to an
  * inode's block. If the block had to be created, "new" will be set.
@@ -433,8 +453,8 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
  * Returns: errno
  */
 
-static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
-                              struct buffer_head *bh_map, struct metapath *mp)
+int gfs2_block_map(struct inode *inode, u64 lblock, int create,
+                  struct buffer_head *bh_map)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -448,57 +468,61 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
        u64 dblock = 0;
        int boundary;
        unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
+       struct metapath mp;
+       u64 size;
 
        BUG_ON(maxlen == 0);
 
        if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
                return 0;
 
+       bmap_lock(inode, create);
+       clear_buffer_mapped(bh_map);
+       clear_buffer_new(bh_map);
+       clear_buffer_boundary(bh_map);
        bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
-
-       height = calc_tree_height(ip, (lblock + 1) * bsize);
-       if (ip->i_di.di_height < height) {
-               if (!create)
-                       return 0;
-
-               error = build_height(inode, height);
-               if (error)
-                       return error;
+       size = (lblock + 1) * bsize;
+
+       if (size > ip->i_di.di_size) {
+               height = calc_tree_height(ip, size);
+               if (ip->i_di.di_height < height) {
+                       if (!create)
+                               goto out_ok;
+       
+                       error = build_height(inode, height);
+                       if (error)
+                               goto out_fail;
+               }
        }
 
-       find_metapath(ip, lblock, mp);
+       find_metapath(ip, lblock, &mp);
        end_of_metadata = ip->i_di.di_height - 1;
-
        error = gfs2_meta_inode_buffer(ip, &bh);
        if (error)
-               return error;
+               goto out_fail;
 
        for (x = 0; x < end_of_metadata; x++) {
-               lookup_block(ip, bh, x, mp, create, &new, &dblock);
+               lookup_block(ip, bh, x, &mp, create, &new, &dblock);
                brelse(bh);
                if (!dblock)
-                       return 0;
+                       goto out_ok;
 
                error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
                if (error)
-                       return error;
+                       goto out_fail;
        }
 
-       boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock);
-       clear_buffer_mapped(bh_map);
-       clear_buffer_new(bh_map);
-       clear_buffer_boundary(bh_map);
-
+       boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock);
        if (dblock) {
                map_bh(bh_map, inode->i_sb, dblock);
                if (boundary)
-                       set_buffer_boundary(bh);
+                       set_buffer_boundary(bh_map);
                if (new) {
                        struct buffer_head *dibh;
                        error = gfs2_meta_inode_buffer(ip, &dibh);
                        if (!error) {
                                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-                               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+                               gfs2_dinode_out(ip, dibh->b_data);
                                brelse(dibh);
                        }
                        set_buffer_new(bh_map);
@@ -507,8 +531,8 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
                while(--maxlen && !buffer_boundary(bh_map)) {
                        u64 eblock;
 
-                       mp->mp_list[end_of_metadata]++;
-                       boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock);
+                       mp.mp_list[end_of_metadata]++;
+                       boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
                        if (eblock != ++dblock)
                                break;
                        bh_map->b_size += (1 << inode->i_blkbits);
@@ -518,43 +542,15 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
        }
 out_brelse:
        brelse(bh);
-       return 0;
-}
-
-
-static inline void bmap_lock(struct inode *inode, int create)
-{
-       struct gfs2_inode *ip = GFS2_I(inode);
-       if (create)
-               down_write(&ip->i_rw_mutex);
-       else
-               down_read(&ip->i_rw_mutex);
-}
-
-static inline void bmap_unlock(struct inode *inode, int create)
-{
-       struct gfs2_inode *ip = GFS2_I(inode);
-       if (create)
-               up_write(&ip->i_rw_mutex);
-       else
-               up_read(&ip->i_rw_mutex);
-}
-
-int gfs2_block_map(struct inode *inode, u64 lblock, int create,
-                  struct buffer_head *bh)
-{
-       struct metapath mp;
-       int ret;
-
-       bmap_lock(inode, create);
-       ret = gfs2_block_pointers(inode, lblock, create, bh, &mp);
+out_ok:
+       error = 0;
+out_fail:
        bmap_unlock(inode, create);
-       return ret;
+       return error;
 }
 
 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 {
-       struct metapath mp;
        struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
        int ret;
        int create = *new;
@@ -564,9 +560,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
        BUG_ON(!new);
 
        bh.b_size = 1 << (inode->i_blkbits + 5);
-       bmap_lock(inode, create);
-       ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp);
-       bmap_unlock(inode, create);
+       ret = gfs2_block_map(inode, lblock, create, &bh);
        *extlen = bh.b_size >> inode->i_blkbits;
        *dblock = bh.b_blocknr;
        if (buffer_new(&bh))
@@ -600,7 +594,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct buffer_head *bh = NULL;
-       u64 *top, *bottom;
+       __be64 *top, *bottom;
        u64 bn;
        int error;
        int mh_size = sizeof(struct gfs2_meta_header);
@@ -611,17 +605,17 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
                        return error;
                dibh = bh;
 
-               top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
-               bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
+               top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
+               bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
        } else {
                error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
                if (error)
                        return error;
 
-               top = (u64 *)(bh->b_data + mh_size) +
+               top = (__be64 *)(bh->b_data + mh_size) +
                                  (first ? mp->mp_list[height] : 0);
 
-               bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
+               bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
        }
 
        error = bc(ip, dibh, bh, top, bottom, height, data);
@@ -660,7 +654,7 @@ out:
  */
 
 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
-                   struct buffer_head *bh, u64 *top, u64 *bottom,
+                   struct buffer_head *bh, __be64 *top, __be64 *bottom,
                    unsigned int height, void *data)
 {
        struct strip_mine *sm = data;
@@ -668,7 +662,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
        struct gfs2_rgrp_list rlist;
        u64 bn, bstart;
        u32 blen;
-       u64 *p;
+       __be64 *p;
        unsigned int rg_blocks = 0;
        int metadata;
        unsigned int revokes = 0;
@@ -770,6 +764,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
                if (!ip->i_di.di_blocks)
                        gfs2_consist_inode(ip);
                ip->i_di.di_blocks--;
+               gfs2_set_inode_blocks(&ip->i_inode);
        }
        if (bstart) {
                if (metadata)
@@ -778,9 +773,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
                        gfs2_free_data(ip, bstart, blen);
        }
 
-       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
 
        up_write(&ip->i_rw_mutex);
 
@@ -819,7 +814,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
        if (error)
                goto out;
 
-       error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+       error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
        if (error)
                goto out_gunlock_q;
 
@@ -853,14 +848,14 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
        }
 
        ip->i_di.di_size = size;
-       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (error)
                goto out_end_trans;
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 
 out_end_trans:
@@ -968,9 +963,9 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
 
        if (gfs2_is_stuffed(ip)) {
                ip->i_di.di_size = size;
-               ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+               ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
                error = 1;
 
@@ -980,10 +975,10 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
 
                if (!error) {
                        ip->i_di.di_size = size;
-                       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+                       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
                        ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
                        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-                       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+                       gfs2_dinode_out(ip, dibh->b_data);
                }
        }
 
@@ -1053,11 +1048,11 @@ static int trunc_end(struct gfs2_inode *ip)
                        ip->i_num.no_addr;
                gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
        }
-       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
        ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 
 out:
@@ -1109,7 +1104,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
 {
        int error;
 
-       if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode)))
+       if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
                return -EINVAL;
 
        if (size > ip->i_di.di_size)
index cab1f68d468518ad9ed37f56b378c170bbab3710..683cb5bda870fc372e79500c105ddf16c9ab93b9 100644 (file)
@@ -112,6 +112,7 @@ int gfs2_logd(void *data)
        struct gfs2_sbd *sdp = data;
        struct gfs2_holder ji_gh;
        unsigned long t;
+       int need_flush;
 
        while (!kthread_should_stop()) {
                /* Advance the log tail */
@@ -120,8 +121,10 @@ int gfs2_logd(void *data)
                    gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
 
                gfs2_ail1_empty(sdp, DIO_ALL);
-
-               if (time_after_eq(jiffies, t)) {
+               gfs2_log_lock(sdp);
+               need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
+               gfs2_log_unlock(sdp);
+               if (need_flush || time_after_eq(jiffies, t)) {
                        gfs2_log_flush(sdp, NULL);
                        sdp->sd_log_flush_time = jiffies;
                }
index e24af28b1a121e556ce19770682c4c9ae4df5fe1..0fdcb7713cd9378c1fe723b61b6eda9e7a1711ee 100644 (file)
@@ -131,8 +131,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
        memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
        if (ip->i_di.di_size < offset + size)
                ip->i_di.di_size = offset + size;
-       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
+       gfs2_dinode_out(ip, dibh->b_data);
 
        brelse(dibh);
 
@@ -229,10 +229,10 @@ out:
 
        if (ip->i_di.di_size < offset + copied)
                ip->i_di.di_size = offset + copied;
-       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
+       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 
        return copied;
@@ -340,10 +340,15 @@ fail:
        return (copied) ? copied : error;
 }
 
+static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
+{
+       return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
+}
+
 static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
                                     const struct qstr *name, int ret)
 {
-       if (dent->de_inum.no_addr != 0 &&
+       if (!gfs2_dirent_sentinel(dent) &&
            be32_to_cpu(dent->de_hash) == name->hash &&
            be16_to_cpu(dent->de_name_len) == name->len &&
            memcmp(dent+1, name->name, name->len) == 0)
@@ -388,7 +393,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
        unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
        unsigned totlen = be16_to_cpu(dent->de_rec_len);
 
-       if (!dent->de_inum.no_addr)
+       if (gfs2_dirent_sentinel(dent))
                actual = GFS2_DIRENT_SIZE(0);
        if (totlen - actual >= required)
                return 1;
@@ -405,7 +410,7 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
                              void *opaque)
 {
        struct dirent_gather *g = opaque;
-       if (dent->de_inum.no_addr) {
+       if (!gfs2_dirent_sentinel(dent)) {
                g->pdent[g->offset++] = dent;
        }
        return 0;
@@ -433,10 +438,10 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
        if (unlikely(offset + size > len))
                goto error;
        msg = "zero inode number";
-       if (unlikely(!first && !dent->de_inum.no_addr))
+       if (unlikely(!first && gfs2_dirent_sentinel(dent)))
                goto error;
        msg = "name length is greater than space in dirent";
-       if (dent->de_inum.no_addr &&
+       if (!gfs2_dirent_sentinel(dent) &&
            unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
                     size))
                goto error;
@@ -598,7 +603,7 @@ static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
                return ret;
 
         /* Only the first dent could ever have de_inum.no_addr == 0 */
-       if (!tmp->de_inum.no_addr) {
+       if (gfs2_dirent_sentinel(tmp)) {
                gfs2_consist_inode(dip);
                return -EIO;
        }
@@ -621,7 +626,7 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
 {
        u16 cur_rec_len, prev_rec_len;
 
-       if (!cur->de_inum.no_addr) {
+       if (gfs2_dirent_sentinel(cur)) {
                gfs2_consist_inode(dip);
                return;
        }
@@ -633,7 +638,8 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
           out the inode number and return.  */
 
        if (!prev) {
-               cur->de_inum.no_addr = 0;       /* No endianess worries */
+               cur->de_inum.no_addr = 0;
+               cur->de_inum.no_formal_ino = 0;
                return;
        }
 
@@ -664,7 +670,7 @@ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
        struct gfs2_dirent *ndent;
        unsigned offset = 0, totlen;
 
-       if (dent->de_inum.no_addr)
+       if (!gfs2_dirent_sentinel(dent))
                offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
        totlen = be16_to_cpu(dent->de_rec_len);
        BUG_ON(offset + name->len > totlen);
@@ -713,12 +719,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
 static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
                       u64 *leaf_out)
 {
-       u64 leaf_no;
+       __be64 leaf_no;
        int error;
 
        error = gfs2_dir_read_data(dip, (char *)&leaf_no,
-                                   index * sizeof(u64),
-                                   sizeof(u64), 0);
+                                   index * sizeof(__be64),
+                                   sizeof(__be64), 0);
        if (error != sizeof(u64))
                return (error < 0) ? error : -EIO;
 
@@ -837,7 +843,8 @@ static int dir_make_exhash(struct inode *inode)
        struct gfs2_leaf *leaf;
        int y;
        u32 x;
-       u64 *lp, bn;
+       __be64 *lp;
+       u64 bn;
        int error;
 
        error = gfs2_meta_inode_buffer(dip, &dibh);
@@ -893,20 +900,20 @@ static int dir_make_exhash(struct inode *inode)
        gfs2_trans_add_bh(dip->i_gl, dibh, 1);
        gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 
-       lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
+       lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
 
        for (x = sdp->sd_hash_ptrs; x--; lp++)
                *lp = cpu_to_be64(bn);
 
        dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
        dip->i_di.di_blocks++;
+       gfs2_set_inode_blocks(&dip->i_inode);
        dip->i_di.di_flags |= GFS2_DIF_EXHASH;
-       dip->i_di.di_payload_format = 0;
 
        for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
        dip->i_di.di_depth = y;
 
-       gfs2_dinode_out(&dip->i_di, dibh->b_data);
+       gfs2_dinode_out(dip, dibh->b_data);
 
        brelse(dibh);
 
@@ -929,7 +936,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
        struct gfs2_leaf *nleaf, *oleaf;
        struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
        u32 start, len, half_len, divider;
-       u64 bn, *lp, leaf_no;
+       u64 bn, leaf_no;
+       __be64 *lp;
        u32 index;
        int x, moved = 0;
        int error;
@@ -974,7 +982,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
        /* Change the pointers.
           Don't bother distinguishing stuffed from non-stuffed.
           This code is complicated enough already. */
-       lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL);
+       lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
        /*  Change the pointers  */
        for (x = 0; x < half_len; x++)
                lp[x] = cpu_to_be64(bn);
@@ -1000,7 +1008,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
                if (dirent_next(dip, obh, &next))
                        next = NULL;
 
-               if (dent->de_inum.no_addr &&
+               if (!gfs2_dirent_sentinel(dent) &&
                    be32_to_cpu(dent->de_hash) < divider) {
                        struct qstr str;
                        str.name = (char*)(dent+1);
@@ -1037,7 +1045,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
        error = gfs2_meta_inode_buffer(dip, &dibh);
        if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
                dip->i_di.di_blocks++;
-               gfs2_dinode_out(&dip->i_di, dibh->b_data);
+               gfs2_set_inode_blocks(&dip->i_inode);
+               gfs2_dinode_out(dip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -1117,7 +1126,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
        error = gfs2_meta_inode_buffer(dip, &dibh);
        if (!gfs2_assert_withdraw(sdp, !error)) {
                dip->i_di.di_depth++;
-               gfs2_dinode_out(&dip->i_di, dibh->b_data);
+               gfs2_dinode_out(dip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -1194,7 +1203,7 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
                           int *copied)
 {
        const struct gfs2_dirent *dent, *dent_next;
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        u64 off, off_next;
        unsigned int x, y;
        int run = 0;
@@ -1341,7 +1350,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
        u32 hsize, len = 0;
        u32 ht_offset, lp_offset, ht_offset_cur = -1;
        u32 hash, index;
-       u64 *lp;
+       __be64 *lp;
        int copied = 0;
        int error = 0;
        unsigned depth = 0;
@@ -1365,7 +1374,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
 
                if (ht_offset_cur != ht_offset) {
                        error = gfs2_dir_read_data(dip, (char *)lp,
-                                               ht_offset * sizeof(u64),
+                                               ht_offset * sizeof(__be64),
                                                sdp->sd_hash_bsize, 1);
                        if (error != sdp->sd_hash_bsize) {
                                if (error >= 0)
@@ -1456,7 +1465,7 @@ out:
  */
 
 int gfs2_dir_search(struct inode *dir, const struct qstr *name,
-                   struct gfs2_inum *inum, unsigned int *type)
+                   struct gfs2_inum_host *inum, unsigned int *type)
 {
        struct buffer_head *bh;
        struct gfs2_dirent *dent;
@@ -1515,7 +1524,8 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
                return error;
        gfs2_trans_add_bh(ip->i_gl, bh, 1);
        ip->i_di.di_blocks++;
-       gfs2_dinode_out(&ip->i_di, bh->b_data);
+       gfs2_set_inode_blocks(&ip->i_inode);
+       gfs2_dinode_out(ip, bh->b_data);
        brelse(bh);
        return 0;
 }
@@ -1531,7 +1541,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
  */
 
 int gfs2_dir_add(struct inode *inode, const struct qstr *name,
-                const struct gfs2_inum *inum, unsigned type)
+                const struct gfs2_inum_host *inum, unsigned type)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct buffer_head *bh;
@@ -1558,8 +1568,8 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
                                break;
                        gfs2_trans_add_bh(ip->i_gl, bh, 1);
                        ip->i_di.di_entries++;
-                       ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
-                       gfs2_dinode_out(&ip->i_di, bh->b_data);
+                       ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
+                       gfs2_dinode_out(ip, bh->b_data);
                        brelse(bh);
                        error = 0;
                        break;
@@ -1644,8 +1654,8 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
                gfs2_consist_inode(dip);
        gfs2_trans_add_bh(dip->i_gl, bh, 1);
        dip->i_di.di_entries--;
-       dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
-       gfs2_dinode_out(&dip->i_di, bh->b_data);
+       dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
+       gfs2_dinode_out(dip, bh->b_data);
        brelse(bh);
        mark_inode_dirty(&dip->i_inode);
 
@@ -1666,7 +1676,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
  */
 
 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-                  struct gfs2_inum *inum, unsigned int new_type)
+                  struct gfs2_inum_host *inum, unsigned int new_type)
 {
        struct buffer_head *bh;
        struct gfs2_dirent *dent;
@@ -1692,8 +1702,8 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
                gfs2_trans_add_bh(dip->i_gl, bh, 1);
        }
 
-       dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
-       gfs2_dinode_out(&dip->i_di, bh->b_data);
+       dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
+       gfs2_dinode_out(dip, bh->b_data);
        brelse(bh);
        return 0;
 }
@@ -1715,7 +1725,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
        u32 hsize, len;
        u32 ht_offset, lp_offset, ht_offset_cur = -1;
        u32 index = 0;
-       u64 *lp;
+       __be64 *lp;
        u64 leaf_no;
        int error = 0;
 
@@ -1735,7 +1745,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
 
                if (ht_offset_cur != ht_offset) {
                        error = gfs2_dir_read_data(dip, (char *)lp,
-                                               ht_offset * sizeof(u64),
+                                               ht_offset * sizeof(__be64),
                                                sdp->sd_hash_bsize, 1);
                        if (error != sdp->sd_hash_bsize) {
                                if (error >= 0)
@@ -1859,6 +1869,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
                if (!dip->i_di.di_blocks)
                        gfs2_consist_inode(dip);
                dip->i_di.di_blocks--;
+               gfs2_set_inode_blocks(&dip->i_inode);
        }
 
        error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1873,7 +1884,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
                goto out_end_trans;
 
        gfs2_trans_add_bh(dip->i_gl, dibh, 1);
-       gfs2_dinode_out(&dip->i_di, dibh->b_data);
+       gfs2_dinode_out(dip, dibh->b_data);
        brelse(dibh);
 
 out_end_trans:
index 371233419b073026eb1a96e0066368d3bde6b0df..b21b33668a5bc4ee2ea29c7658c2f8f343232900 100644 (file)
@@ -31,17 +31,17 @@ struct gfs2_inum;
 typedef int (*gfs2_filldir_t) (void *opaque,
                              const char *name, unsigned int length,
                              u64 offset,
-                             struct gfs2_inum *inum, unsigned int type);
+                             struct gfs2_inum_host *inum, unsigned int type);
 
 int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
-                   struct gfs2_inum *inum, unsigned int *type);
+                   struct gfs2_inum_host *inum, unsigned int *type);
 int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
-                const struct gfs2_inum *inum, unsigned int type);
+                const struct gfs2_inum_host *inum, unsigned int type);
 int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
 int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
                  gfs2_filldir_t filldir);
 int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-                  struct gfs2_inum *new_inum, unsigned int new_type);
+                  struct gfs2_inum_host *new_inum, unsigned int new_type);
 
 int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
 
index 92c54e9b0dc3b8592cec55b33c0ba61d185ac37f..cd747c00f670363db6f2dbe41164858e56b4778f 100644 (file)
@@ -120,7 +120,7 @@ static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
 
        if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
                if (!(er->er_flags & GFS2_ERF_MODE)) {
-                       er->er_mode = ip->i_di.di_mode;
+                       er->er_mode = ip->i_inode.i_mode;
                        er->er_flags |= GFS2_ERF_MODE;
                }
                error = gfs2_acl_validate_set(ip, 1, er,
index a65a4ccfd4dd172c156e786dfb7648180c1362fc..ebebbdcd7057cf2b2dd1b700eb1ac18aadab5a92 100644 (file)
@@ -112,7 +112,7 @@ fail:
 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
 {
        struct buffer_head *bh, *eabh;
-       u64 *eablk, *end;
+       __be64 *eablk, *end;
        int error;
 
        error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
@@ -129,7 +129,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
                goto out;
        }
 
-       eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
+       eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
        end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
 
        for (; eablk < end; eablk++) {
@@ -224,7 +224,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
        struct gfs2_rgrpd *rgd;
        struct gfs2_holder rg_gh;
        struct buffer_head *dibh;
-       u64 *dataptrs, bn = 0;
+       __be64 *dataptrs;
+       u64 bn = 0;
        u64 bstart = 0;
        unsigned int blen = 0;
        unsigned int blks = 0;
@@ -280,6 +281,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
                if (!ip->i_di.di_blocks)
                        gfs2_consist_inode(ip);
                ip->i_di.di_blocks--;
+               gfs2_set_inode_blocks(&ip->i_inode);
        }
        if (bstart)
                gfs2_free_meta(ip, bstart, blen);
@@ -299,9 +301,9 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (!error) {
-               ip->i_di.di_ctime = get_seconds();
+               ip->i_inode.i_ctime.tv_sec = get_seconds();
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -444,7 +446,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
        struct buffer_head **bh;
        unsigned int amount = GFS2_EA_DATA_LEN(ea);
        unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
-       u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
+       __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
        unsigned int x;
        int error = 0;
 
@@ -597,6 +599,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
        ea->ea_num_ptrs = 0;
 
        ip->i_di.di_blocks++;
+       gfs2_set_inode_blocks(&ip->i_inode);
 
        return 0;
 }
@@ -629,7 +632,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
                ea->ea_num_ptrs = 0;
                memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
        } else {
-               u64 *dataptr = GFS2_EA2DATAPTRS(ea);
+               __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
                const char *data = er->er_data;
                unsigned int data_len = er->er_data_len;
                unsigned int copy;
@@ -648,6 +651,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
                        gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
 
                        ip->i_di.di_blocks++;
+                       gfs2_set_inode_blocks(&ip->i_inode);
 
                        copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
                                                           data_len;
@@ -686,7 +690,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
        if (error)
                goto out;
 
-       error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+       error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
        if (error)
                goto out_gunlock_q;
 
@@ -710,13 +714,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
        if (!error) {
                if (er->er_flags & GFS2_ERF_MODE) {
                        gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
-                                           (ip->i_di.di_mode & S_IFMT) ==
+                                           (ip->i_inode.i_mode & S_IFMT) ==
                                            (er->er_mode & S_IFMT));
-                       ip->i_di.di_mode = er->er_mode;
+                       ip->i_inode.i_mode = er->er_mode;
                }
-               ip->i_di.di_ctime = get_seconds();
+               ip->i_inode.i_ctime.tv_sec = get_seconds();
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -846,12 +850,12 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
 
        if (er->er_flags & GFS2_ERF_MODE) {
                gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
-                       (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
-               ip->i_di.di_mode = er->er_mode;
+                       (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
+               ip->i_inode.i_mode = er->er_mode;
        }
-       ip->i_di.di_ctime = get_seconds();
+       ip->i_inode.i_ctime.tv_sec = get_seconds();
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 out:
        gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -931,12 +935,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct buffer_head *indbh, *newbh;
-       u64 *eablk;
+       __be64 *eablk;
        int error;
        int mh_size = sizeof(struct gfs2_meta_header);
 
        if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
-               u64 *end;
+               __be64 *end;
 
                error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
                                       &indbh);
@@ -948,7 +952,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
                        goto out;
                }
 
-               eablk = (u64 *)(indbh->b_data + mh_size);
+               eablk = (__be64 *)(indbh->b_data + mh_size);
                end = eablk + sdp->sd_inptrs;
 
                for (; eablk < end; eablk++)
@@ -971,11 +975,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
                gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
                gfs2_buffer_clear_tail(indbh, mh_size);
 
-               eablk = (u64 *)(indbh->b_data + mh_size);
+               eablk = (__be64 *)(indbh->b_data + mh_size);
                *eablk = cpu_to_be64(ip->i_di.di_eattr);
                ip->i_di.di_eattr = blk;
                ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
                ip->i_di.di_blocks++;
+               gfs2_set_inode_blocks(&ip->i_inode);
 
                eablk++;
        }
@@ -1129,9 +1134,9 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (!error) {
-               ip->i_di.di_ctime = get_seconds();
+               ip->i_inode.i_ctime.tv_sec = get_seconds();
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -1202,7 +1207,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
        struct buffer_head **bh;
        unsigned int amount = GFS2_EA_DATA_LEN(ea);
        unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
-       u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
+       __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
        unsigned int x;
        int error;
 
@@ -1284,9 +1289,8 @@ int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
        if (!error) {
                error = inode_setattr(&ip->i_inode, attr);
                gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
-               gfs2_inode_attr_out(ip);
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -1300,7 +1304,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_rgrp_list rlist;
        struct buffer_head *indbh, *dibh;
-       u64 *eablk, *end;
+       __be64 *eablk, *end;
        unsigned int rg_blocks = 0;
        u64 bstart = 0;
        unsigned int blen = 0;
@@ -1319,7 +1323,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
                goto out;
        }
 
-       eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
+       eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
        end = eablk + sdp->sd_inptrs;
 
        for (; eablk < end; eablk++) {
@@ -1363,7 +1367,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
 
        gfs2_trans_add_bh(ip->i_gl, indbh, 1);
 
-       eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
+       eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
        bstart = 0;
        blen = 0;
 
@@ -1387,6 +1391,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
                if (!ip->i_di.di_blocks)
                        gfs2_consist_inode(ip);
                ip->i_di.di_blocks--;
+               gfs2_set_inode_blocks(&ip->i_inode);
        }
        if (bstart)
                gfs2_free_meta(ip, bstart, blen);
@@ -1396,7 +1401,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (!error) {
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -1441,11 +1446,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
        if (!ip->i_di.di_blocks)
                gfs2_consist_inode(ip);
        ip->i_di.di_blocks--;
+       gfs2_set_inode_blocks(&ip->i_inode);
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (!error) {
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
index ffa65947d6869435330031873fcd8c3210ab8012..c82dbe01d7137f07d7bc1a90b59bb2ac22f5dd30 100644 (file)
@@ -19,7 +19,7 @@ struct iattr;
 #define GFS2_EA_SIZE(ea) \
 ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
       ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
-                                  (sizeof(u64) * (ea)->ea_num_ptrs)), 8)
+                                  (sizeof(__be64) * (ea)->ea_num_ptrs)), 8)
 
 #define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
 #define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
@@ -29,13 +29,13 @@ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
 
 #define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
 ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
-      sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
+      sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
 
 #define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
 #define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
 
 #define GFS2_EA2DATAPTRS(ea) \
-((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
+((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
 
 #define GFS2_EA2NEXT(ea) \
 ((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
index 78fe0fae23ff538d26af8f3d15a877dd61368e72..438146904b5839727ac2c378c5df87f574cfedaa 100644 (file)
@@ -35,7 +35,7 @@
 
 struct greedy {
        struct gfs2_holder gr_gh;
-       struct work_struct gr_work;
+       struct delayed_work gr_work;
 };
 
 struct gfs2_gl_hash_bucket {
@@ -96,7 +96,7 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
        return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
 }
 #else /* not SMP, so no spinlocks required */
-static inline rwlock_t *gl_lock_addr(x)
+static inline rwlock_t *gl_lock_addr(unsigned int x)
 {
        return NULL;
 }
@@ -769,7 +769,7 @@ restart:
        } else {
                spin_unlock(&gl->gl_spin);
 
-               new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
+               new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
                if (!new_gh)
                        return;
                set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
@@ -785,21 +785,6 @@ out:
                gfs2_holder_put(new_gh);
 }
 
-void gfs2_glock_inode_squish(struct inode *inode)
-{
-       struct gfs2_holder gh;
-       struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
-       gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
-       set_bit(HIF_DEMOTE, &gh.gh_iflags);
-       spin_lock(&gl->gl_spin);
-       gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
-       list_add_tail(&gh.gh_list, &gl->gl_waiters2);
-       run_queue(gl);
-       spin_unlock(&gl->gl_spin);
-       wait_for_completion(&gh.gh_wait);
-       gfs2_holder_uninit(&gh);
-}
-
 /**
  * state_change - record that the glock is now in a different state
  * @gl: the glock
@@ -847,12 +832,12 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
 
        if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
                if (glops->go_inval)
-                       glops->go_inval(gl, DIO_METADATA | DIO_DATA);
+                       glops->go_inval(gl, DIO_METADATA);
        } else if (gl->gl_state == LM_ST_DEFERRED) {
                /* We might not want to do this here.
                   Look at moving to the inode glops. */
                if (glops->go_inval)
-                       glops->go_inval(gl, DIO_DATA);
+                       glops->go_inval(gl, 0);
        }
 
        /*  Deal with each possible exit condition  */
@@ -954,7 +939,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
        gfs2_assert_warn(sdp, state != gl->gl_state);
 
        if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
-               glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
+               glops->go_sync(gl);
 
        gfs2_glock_hold(gl);
        gl->gl_req_bh = xmote_bh;
@@ -995,7 +980,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
        state_change(gl, LM_ST_UNLOCKED);
 
        if (glops->go_inval)
-               glops->go_inval(gl, DIO_METADATA | DIO_DATA);
+               glops->go_inval(gl, DIO_METADATA);
 
        if (gh) {
                spin_lock(&gl->gl_spin);
@@ -1041,7 +1026,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
        gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
 
        if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
-               glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
+               glops->go_sync(gl);
 
        gfs2_glock_hold(gl);
        gl->gl_req_bh = drop_bh;
@@ -1244,9 +1229,6 @@ restart:
 
        clear_bit(GLF_PREFETCH, &gl->gl_flags);
 
-       if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
-               dump_glock(gl);
-
        return error;
 }
 
@@ -1368,9 +1350,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
        glops->go_xmote_th(gl, state, flags);
 }
 
-static void greedy_work(void *data)
+static void greedy_work(struct work_struct *work)
 {
-       struct greedy *gr = data;
+       struct greedy *gr = container_of(work, struct greedy, gr_work.work);
        struct gfs2_holder *gh = &gr->gr_gh;
        struct gfs2_glock *gl = gh->gh_gl;
        const struct gfs2_glock_operations *glops = gl->gl_ops;
@@ -1422,7 +1404,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
 
        gfs2_holder_init(gl, 0, 0, gh);
        set_bit(HIF_GREEDY, &gh->gh_iflags);
-       INIT_WORK(&gr->gr_work, greedy_work, gr);
+       INIT_DELAYED_WORK(&gr->gr_work, greedy_work);
 
        set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
        schedule_delayed_work(&gr->gr_work, time);
@@ -1923,7 +1905,7 @@ out:
 
 static void scan_glock(struct gfs2_glock *gl)
 {
-       if (gl->gl_ops == &gfs2_inode_glops)
+       if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
                return;
 
        if (gfs2_glmutex_trylock(gl)) {
@@ -2078,7 +2060,7 @@ static int dump_inode(struct gfs2_inode *ip)
        printk(KERN_INFO "    num = %llu %llu\n",
                    (unsigned long long)ip->i_num.no_formal_ino,
                    (unsigned long long)ip->i_num.no_addr);
-       printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_di.di_mode));
+       printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_inode.i_mode));
        printk(KERN_INFO "    i_flags =");
        for (x = 0; x < 32; x++)
                if (test_bit(x, &ip->i_flags))
index 2b2a889ee2cc9b516bbf8b9d1ac93a5421172c4c..fb39108fc05c2e378f6d349775ab808a986e3594 100644 (file)
@@ -27,8 +27,6 @@
 #define GL_ATIME               0x00000200
 #define GL_NOCACHE             0x00000400
 #define GL_NOCANCEL            0x00001000
-#define GL_AOP                 0x00004000
-#define GL_DUMP                        0x00008000
 
 #define GLR_TRYFAILED          13
 #define GLR_CANCELED           14
@@ -108,7 +106,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
                             const struct gfs2_glock_operations *glops,
                             unsigned int state, int flags);
-void gfs2_glock_inode_squish(struct inode *inode);
 
 /**
  * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
index 41a6b6818a505e1087ba5040d50c7018af6c64b2..b068d10bcb6e75b981eef0b977cd4ca912743960 100644 (file)
@@ -92,7 +92,7 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
 
        ip = gl->gl_object;
        inode = &ip->i_inode;
-       if (!ip || !S_ISREG(ip->i_di.di_mode))
+       if (!ip || !S_ISREG(inode->i_mode))
                return;
 
        if (!test_bit(GIF_PAGED, &ip->i_flags))
@@ -106,90 +106,21 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
        clear_bit(GIF_SW_PAGED, &ip->i_flags);
 }
 
-/**
- * gfs2_page_inval - Invalidate all pages associated with a glock
- * @gl: the glock
- *
- */
-
-static void gfs2_page_inval(struct gfs2_glock *gl)
-{
-       struct gfs2_inode *ip;
-       struct inode *inode;
-
-       ip = gl->gl_object;
-       inode = &ip->i_inode;
-       if (!ip || !S_ISREG(ip->i_di.di_mode))
-               return;
-
-       truncate_inode_pages(inode->i_mapping, 0);
-       gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
-       clear_bit(GIF_PAGED, &ip->i_flags);
-}
-
-/**
- * gfs2_page_wait - Wait for writeback of data
- * @gl: the glock
- *
- * Syncs data (not metadata) for a regular file.
- * No-op for all other types.
- */
-
-static void gfs2_page_wait(struct gfs2_glock *gl)
-{
-       struct gfs2_inode *ip = gl->gl_object;
-       struct inode *inode = &ip->i_inode;
-       struct address_space *mapping = inode->i_mapping;
-       int error;
-
-       if (!S_ISREG(ip->i_di.di_mode))
-               return;
-
-       error = filemap_fdatawait(mapping);
-
-       /* Put back any errors cleared by filemap_fdatawait()
-          so they can be caught by someone who can pass them
-          up to user space. */
-
-       if (error == -ENOSPC)
-               set_bit(AS_ENOSPC, &mapping->flags);
-       else if (error)
-               set_bit(AS_EIO, &mapping->flags);
-
-}
-
-static void gfs2_page_writeback(struct gfs2_glock *gl)
-{
-       struct gfs2_inode *ip = gl->gl_object;
-       struct inode *inode = &ip->i_inode;
-       struct address_space *mapping = inode->i_mapping;
-
-       if (!S_ISREG(ip->i_di.di_mode))
-               return;
-
-       filemap_fdatawrite(mapping);
-}
-
 /**
  * meta_go_sync - sync out the metadata for this glock
  * @gl: the glock
- * @flags: DIO_*
  *
  * Called when demoting or unlocking an EX glock.  We must flush
  * to disk all dirty buffers/pages relating to this glock, and must not
  * not return to caller to demote/unlock the glock until I/O is complete.
  */
 
-static void meta_go_sync(struct gfs2_glock *gl, int flags)
+static void meta_go_sync(struct gfs2_glock *gl)
 {
-       if (!(flags & DIO_METADATA))
-               return;
-
        if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
                gfs2_log_flush(gl->gl_sbd, gl);
                gfs2_meta_sync(gl);
-               if (flags & DIO_RELEASE)
-                       gfs2_ail_empty_gl(gl);
+               gfs2_ail_empty_gl(gl);
        }
 
 }
@@ -264,31 +195,31 @@ static void inode_go_drop_th(struct gfs2_glock *gl)
 /**
  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
  * @gl: the glock protecting the inode
- * @flags:
  *
  */
 
-static void inode_go_sync(struct gfs2_glock *gl, int flags)
+static void inode_go_sync(struct gfs2_glock *gl)
 {
-       int meta = (flags & DIO_METADATA);
-       int data = (flags & DIO_DATA);
+       struct gfs2_inode *ip = gl->gl_object;
+
+       if (ip && !S_ISREG(ip->i_inode.i_mode))
+               ip = NULL;
 
        if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
-               if (meta && data) {
-                       gfs2_page_writeback(gl);
-                       gfs2_log_flush(gl->gl_sbd, gl);
-                       gfs2_meta_sync(gl);
-                       gfs2_page_wait(gl);
-                       clear_bit(GLF_DIRTY, &gl->gl_flags);
-               } else if (meta) {
-                       gfs2_log_flush(gl->gl_sbd, gl);
-                       gfs2_meta_sync(gl);
-               } else if (data) {
-                       gfs2_page_writeback(gl);
-                       gfs2_page_wait(gl);
+               gfs2_log_flush(gl->gl_sbd, gl);
+               if (ip)
+                       filemap_fdatawrite(ip->i_inode.i_mapping);
+               gfs2_meta_sync(gl);
+               if (ip) {
+                       struct address_space *mapping = ip->i_inode.i_mapping;
+                       int error = filemap_fdatawait(mapping);
+                       if (error == -ENOSPC)
+                               set_bit(AS_ENOSPC, &mapping->flags);
+                       else if (error)
+                               set_bit(AS_EIO, &mapping->flags);
                }
-               if (flags & DIO_RELEASE)
-                       gfs2_ail_empty_gl(gl);
+               clear_bit(GLF_DIRTY, &gl->gl_flags);
+               gfs2_ail_empty_gl(gl);
        }
 }
 
@@ -301,15 +232,20 @@ static void inode_go_sync(struct gfs2_glock *gl, int flags)
 
 static void inode_go_inval(struct gfs2_glock *gl, int flags)
 {
+       struct gfs2_inode *ip = gl->gl_object;
        int meta = (flags & DIO_METADATA);
-       int data = (flags & DIO_DATA);
 
        if (meta) {
                gfs2_meta_inval(gl);
-               gl->gl_vn++;
+               if (ip)
+                       set_bit(GIF_INVALID, &ip->i_flags);
+       }
+
+       if (ip && S_ISREG(ip->i_inode.i_mode)) {
+               truncate_inode_pages(ip->i_inode.i_mapping, 0);
+               gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
+               clear_bit(GIF_PAGED, &ip->i_flags);
        }
-       if (data)
-               gfs2_page_inval(gl);
 }
 
 /**
@@ -351,11 +287,10 @@ static int inode_go_lock(struct gfs2_holder *gh)
        if (!ip)
                return 0;
 
-       if (ip->i_vn != gl->gl_vn) {
+       if (test_bit(GIF_INVALID, &ip->i_flags)) {
                error = gfs2_inode_refresh(ip);
                if (error)
                        return error;
-               gfs2_inode_attr_in(ip);
        }
 
        if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
@@ -379,11 +314,8 @@ static void inode_go_unlock(struct gfs2_holder *gh)
        struct gfs2_glock *gl = gh->gh_gl;
        struct gfs2_inode *ip = gl->gl_object;
 
-       if (ip == NULL)
-               return;
-       if (test_bit(GLF_DIRTY, &gl->gl_flags))
-               gfs2_inode_attr_in(ip);
-       gfs2_meta_cache_flush(ip);
+       if (ip)
+               gfs2_meta_cache_flush(ip);
 }
 
 /**
@@ -491,13 +423,13 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
        struct gfs2_glock *j_gl = ip->i_gl;
-       struct gfs2_log_header head;
+       struct gfs2_log_header_host head;
        int error;
 
        if (gl->gl_state != LM_ST_UNLOCKED &&
            test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
                gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
-               j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
+               j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 
                error = gfs2_find_jhead(sdp->sd_jdesc, &head);
                if (error)
index 118dc693d1117d178883c3283c19ce9ad0faeb55..734421edae85a21ac02bf00e6735228208965919 100644 (file)
@@ -14,8 +14,6 @@
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
-#define DIO_DATA       0x00000040
-#define DIO_RELEASE    0x00000080
 #define DIO_ALL                0x00000100
 
 struct gfs2_log_operations;
@@ -41,7 +39,7 @@ struct gfs2_log_operations {
        void (*lo_before_commit) (struct gfs2_sbd *sdp);
        void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
        void (*lo_before_scan) (struct gfs2_jdesc *jd,
-                               struct gfs2_log_header *head, int pass);
+                               struct gfs2_log_header_host *head, int pass);
        int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
                                 struct gfs2_log_descriptor *ld, __be64 *ptr,
                                 int pass);
@@ -67,8 +65,8 @@ struct gfs2_rgrpd {
        struct list_head rd_list_mru;
        struct list_head rd_recent;     /* Recently used rgrps */
        struct gfs2_glock *rd_gl;       /* Glock for this rgrp */
-       struct gfs2_rindex rd_ri;
-       struct gfs2_rgrp rd_rg;
+       struct gfs2_rindex_host rd_ri;
+       struct gfs2_rgrp_host rd_rg;
        u64 rd_rg_vn;
        struct gfs2_bitmap *rd_bits;
        unsigned int rd_bh_count;
@@ -103,18 +101,17 @@ struct gfs2_bufdata {
 };
 
 struct gfs2_glock_operations {
-       void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
-                            int flags);
-       void (*go_xmote_bh) (struct gfs2_glock * gl);
-       void (*go_drop_th) (struct gfs2_glock * gl);
-       void (*go_drop_bh) (struct gfs2_glock * gl);
-       void (*go_sync) (struct gfs2_glock * gl, int flags);
-       void (*go_inval) (struct gfs2_glock * gl, int flags);
-       int (*go_demote_ok) (struct gfs2_glock * gl);
-       int (*go_lock) (struct gfs2_holder * gh);
-       void (*go_unlock) (struct gfs2_holder * gh);
-       void (*go_callback) (struct gfs2_glock * gl, unsigned int state);
-       void (*go_greedy) (struct gfs2_glock * gl);
+       void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags);
+       void (*go_xmote_bh) (struct gfs2_glock *gl);
+       void (*go_drop_th) (struct gfs2_glock *gl);
+       void (*go_drop_bh) (struct gfs2_glock *gl);
+       void (*go_sync) (struct gfs2_glock *gl);
+       void (*go_inval) (struct gfs2_glock *gl, int flags);
+       int (*go_demote_ok) (struct gfs2_glock *gl);
+       int (*go_lock) (struct gfs2_holder *gh);
+       void (*go_unlock) (struct gfs2_holder *gh);
+       void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
+       void (*go_greedy) (struct gfs2_glock *gl);
        const int go_type;
 };
 
@@ -217,6 +214,7 @@ struct gfs2_alloc {
 };
 
 enum {
+       GIF_INVALID             = 0,
        GIF_QD_LOCKED           = 1,
        GIF_PAGED               = 2,
        GIF_SW_PAGED            = 3,
@@ -224,12 +222,11 @@ enum {
 
 struct gfs2_inode {
        struct inode i_inode;
-       struct gfs2_inum i_num;
+       struct gfs2_inum_host i_num;
 
        unsigned long i_flags;          /* GIF_... */
 
-       u64 i_vn;
-       struct gfs2_dinode i_di; /* To be replaced by ref to block */
+       struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
 
        struct gfs2_glock *i_gl; /* Move into i_gh? */
        struct gfs2_holder i_iopen_gh;
@@ -450,7 +447,7 @@ struct gfs2_sbd {
        struct super_block *sd_vfs_meta;
        struct kobject sd_kobj;
        unsigned long sd_flags; /* SDF_... */
-       struct gfs2_sb sd_sb;
+       struct gfs2_sb_host sd_sb;
 
        /* Constants computed on mount */
 
@@ -503,8 +500,8 @@ struct gfs2_sbd {
 
        spinlock_t sd_statfs_spin;
        struct mutex sd_statfs_mutex;
-       struct gfs2_statfs_change sd_statfs_master;
-       struct gfs2_statfs_change sd_statfs_local;
+       struct gfs2_statfs_change_host sd_statfs_master;
+       struct gfs2_statfs_change_host sd_statfs_local;
        unsigned long sd_statfs_sync_time;
 
        /* Resource group stuff */
index d470e5286ecd7715e0804a7fc54a6e34015c7559..d122074c45e16d3163dd61d9e819b6c01ae652aa 100644 (file)
 #include "trans.h"
 #include "util.h"
 
-/**
- * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
- * @ip: The GFS2 inode (with embedded disk inode data)
- * @inode:  The Linux VFS inode
- *
- */
-
-void gfs2_inode_attr_in(struct gfs2_inode *ip)
-{
-       struct inode *inode = &ip->i_inode;
-       struct gfs2_dinode *di = &ip->i_di;
-
-       inode->i_ino = ip->i_num.no_addr;
-
-       switch (di->di_mode & S_IFMT) {
-       case S_IFBLK:
-       case S_IFCHR:
-               inode->i_rdev = MKDEV(di->di_major, di->di_minor);
-               break;
-       default:
-               inode->i_rdev = 0;
-               break;
-       };
-
-       inode->i_mode = di->di_mode;
-       inode->i_nlink = di->di_nlink;
-       inode->i_uid = di->di_uid;
-       inode->i_gid = di->di_gid;
-       i_size_write(inode, di->di_size);
-       inode->i_atime.tv_sec = di->di_atime;
-       inode->i_mtime.tv_sec = di->di_mtime;
-       inode->i_ctime.tv_sec = di->di_ctime;
-       inode->i_atime.tv_nsec = 0;
-       inode->i_mtime.tv_nsec = 0;
-       inode->i_ctime.tv_nsec = 0;
-       inode->i_blocks = di->di_blocks <<
-               (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
-
-       if (di->di_flags & GFS2_DIF_IMMUTABLE)
-               inode->i_flags |= S_IMMUTABLE;
-       else
-               inode->i_flags &= ~S_IMMUTABLE;
-
-       if (di->di_flags & GFS2_DIF_APPENDONLY)
-               inode->i_flags |= S_APPEND;
-       else
-               inode->i_flags &= ~S_APPEND;
-}
-
-/**
- * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
- * @ip: The GFS2 inode
- *
- * Only copy out the attributes that we want the VFS layer
- * to be able to modify.
- */
-
-void gfs2_inode_attr_out(struct gfs2_inode *ip)
-{
-       struct inode *inode = &ip->i_inode;
-       struct gfs2_dinode *di = &ip->i_di;
-       gfs2_assert_withdraw(GFS2_SB(inode),
-               (di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
-       di->di_mode = inode->i_mode;
-       di->di_uid = inode->i_uid;
-       di->di_gid = inode->i_gid;
-       di->di_atime = inode->i_atime.tv_sec;
-       di->di_mtime = inode->i_mtime.tv_sec;
-       di->di_ctime = inode->i_ctime.tv_sec;
-}
-
 static int iget_test(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_inum *inum = opaque;
+       struct gfs2_inum_host *inum = opaque;
 
-       if (ip && ip->i_num.no_addr == inum->no_addr)
+       if (ip->i_num.no_addr == inum->no_addr)
                return 1;
 
        return 0;
@@ -123,19 +52,20 @@ static int iget_test(struct inode *inode, void *opaque)
 static int iget_set(struct inode *inode, void *opaque)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_inum *inum = opaque;
+       struct gfs2_inum_host *inum = opaque;
 
        ip->i_num = *inum;
+       inode->i_ino = inum->no_addr;
        return 0;
 }
 
-struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum)
+struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
 {
        return ilookup5(sb, (unsigned long)inum->no_formal_ino,
                        iget_test, inum);
 }
 
-static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
+static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
 {
        return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
                     iget_test, iget_set, inum);
@@ -150,7 +80,7 @@ static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
  * Returns: A VFS inode, or an error
  */
 
-struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type)
+struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
 {
        struct inode *inode = gfs2_iget(sb, inum);
        struct gfs2_inode *ip = GFS2_I(inode);
@@ -188,7 +118,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum,
                if (unlikely(error))
                        goto fail_put;
 
-               ip->i_vn = ip->i_gl->gl_vn - 1;
+               set_bit(GIF_INVALID, &ip->i_flags);
                error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                if (unlikely(error))
                        goto fail_iopen;
@@ -208,6 +138,63 @@ fail:
        return ERR_PTR(error);
 }
 
+static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
+{
+       struct gfs2_dinode_host *di = &ip->i_di;
+       const struct gfs2_dinode *str = buf;
+
+       if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
+               if (gfs2_consist_inode(ip))
+                       gfs2_dinode_print(ip);
+               return -EIO;
+       }
+       if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
+               return -ESTALE;
+
+       ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
+       ip->i_inode.i_rdev = 0;
+       switch (ip->i_inode.i_mode & S_IFMT) {
+       case S_IFBLK:
+       case S_IFCHR:
+               ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
+                                          be32_to_cpu(str->di_minor));
+               break;
+       };
+
+       ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
+       ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
+       /*
+        * We will need to review setting the nlink count here in the
+        * light of the forthcoming ro bind mount work. This is a reminder
+        * to do that.
+        */
+       ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
+       di->di_size = be64_to_cpu(str->di_size);
+       i_size_write(&ip->i_inode, di->di_size);
+       di->di_blocks = be64_to_cpu(str->di_blocks);
+       gfs2_set_inode_blocks(&ip->i_inode);
+       ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
+       ip->i_inode.i_atime.tv_nsec = 0;
+       ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
+       ip->i_inode.i_mtime.tv_nsec = 0;
+       ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
+       ip->i_inode.i_ctime.tv_nsec = 0;
+
+       di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
+       di->di_goal_data = be64_to_cpu(str->di_goal_data);
+       di->di_generation = be64_to_cpu(str->di_generation);
+
+       di->di_flags = be32_to_cpu(str->di_flags);
+       gfs2_set_inode_flags(&ip->i_inode);
+       di->di_height = be16_to_cpu(str->di_height);
+
+       di->di_depth = be16_to_cpu(str->di_depth);
+       di->di_entries = be32_to_cpu(str->di_entries);
+
+       di->di_eattr = be64_to_cpu(str->di_eattr);
+       return 0;
+}
+
 /**
  * gfs2_inode_refresh - Refresh the incore copy of the dinode
  * @ip: The GFS2 inode
@@ -229,21 +216,11 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
                return -EIO;
        }
 
-       gfs2_dinode_in(&ip->i_di, dibh->b_data);
-
+       error = gfs2_dinode_in(ip, dibh->b_data);
        brelse(dibh);
+       clear_bit(GIF_INVALID, &ip->i_flags);
 
-       if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
-               if (gfs2_consist_inode(ip))
-                       gfs2_dinode_print(&ip->i_di);
-               return -EIO;
-       }
-       if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
-               return -ESTALE;
-
-       ip->i_vn = ip->i_gl->gl_vn;
-
-       return 0;
+       return error;
 }
 
 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
@@ -255,7 +232,7 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip)
 
        if (ip->i_di.di_blocks != 1) {
                if (gfs2_consist_inode(ip))
-                       gfs2_dinode_print(&ip->i_di);
+                       gfs2_dinode_print(ip);
                return -EIO;
        }
 
@@ -318,14 +295,14 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
        u32 nlink;
        int error;
 
-       BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink);
-       nlink = ip->i_di.di_nlink + diff;
+       BUG_ON(diff != 1 && diff != -1);
+       nlink = ip->i_inode.i_nlink + diff;
 
        /* If we are reducing the nlink count, but the new value ends up being
           bigger than the old one, we must have underflowed. */
-       if (diff < 0 && nlink > ip->i_di.di_nlink) {
+       if (diff < 0 && nlink > ip->i_inode.i_nlink) {
                if (gfs2_consist_inode(ip))
-                       gfs2_dinode_print(&ip->i_di);
+                       gfs2_dinode_print(ip);
                return -EIO;
        }
 
@@ -333,16 +310,19 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
        if (error)
                return error;
 
-       ip->i_di.di_nlink = nlink;
-       ip->i_di.di_ctime = get_seconds();
-       ip->i_inode.i_nlink = nlink;
+       if (diff > 0)
+               inc_nlink(&ip->i_inode);
+       else
+               drop_nlink(&ip->i_inode);
+
+       ip->i_inode.i_ctime.tv_sec = get_seconds();
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
        mark_inode_dirty(&ip->i_inode);
 
-       if (ip->i_di.di_nlink == 0) {
+       if (ip->i_inode.i_nlink == 0) {
                struct gfs2_rgrpd *rgd;
                struct gfs2_holder ri_gh, rg_gh;
 
@@ -357,7 +337,6 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
                if (error)
                        goto out_norgrp;
 
-               clear_nlink(&ip->i_inode);
                gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
                gfs2_glock_dq_uninit(&rg_gh);
 out_norgrp:
@@ -394,7 +373,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
        struct super_block *sb = dir->i_sb;
        struct gfs2_inode *dip = GFS2_I(dir);
        struct gfs2_holder d_gh;
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        unsigned int type;
        int error = 0;
        struct inode *inode = NULL;
@@ -436,7 +415,7 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
 {
        struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
        struct buffer_head *bh;
-       struct gfs2_inum_range ir;
+       struct gfs2_inum_range_host ir;
        int error;
 
        error = gfs2_trans_begin(sdp, RES_DINODE, 0);
@@ -479,7 +458,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
        struct gfs2_holder gh;
        struct buffer_head *bh;
-       struct gfs2_inum_range ir;
+       struct gfs2_inum_range_host ir;
        int error;
 
        error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -500,21 +479,22 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
        if (!ir.ir_length) {
                struct buffer_head *m_bh;
                u64 x, y;
+               __be64 z;
 
                error = gfs2_meta_inode_buffer(m_ip, &m_bh);
                if (error)
                        goto out_brelse;
 
-               x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
-               x = y = be64_to_cpu(x);
+               z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
+               x = y = be64_to_cpu(z);
                ir.ir_start = x;
                ir.ir_length = GFS2_INUM_QUANTUM;
                x += GFS2_INUM_QUANTUM;
                if (x < y)
                        gfs2_consist_inode(m_ip);
-               x = cpu_to_be64(x);
+               z = cpu_to_be64(x);
                gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
-               *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
+               *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
 
                brelse(m_bh);
        }
@@ -567,7 +547,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
                return error;
 
        /*  Don't create entries in an unlinked directory  */
-       if (!dip->i_di.di_nlink)
+       if (!dip->i_inode.i_nlink)
                return -EPERM;
 
        error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
@@ -583,7 +563,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
 
        if (dip->i_di.di_entries == (u32)-1)
                return -EFBIG;
-       if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1)
+       if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
                return -EMLINK;
 
        return 0;
@@ -593,24 +573,24 @@ static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
                               unsigned int *uid, unsigned int *gid)
 {
        if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
-           (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) {
+           (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
                if (S_ISDIR(*mode))
                        *mode |= S_ISUID;
-               else if (dip->i_di.di_uid != current->fsuid)
+               else if (dip->i_inode.i_uid != current->fsuid)
                        *mode &= ~07111;
-               *uid = dip->i_di.di_uid;
+               *uid = dip->i_inode.i_uid;
        } else
                *uid = current->fsuid;
 
-       if (dip->i_di.di_mode & S_ISGID) {
+       if (dip->i_inode.i_mode & S_ISGID) {
                if (S_ISDIR(*mode))
                        *mode |= S_ISGID;
-               *gid = dip->i_di.di_gid;
+               *gid = dip->i_inode.i_gid;
        } else
                *gid = current->fsgid;
 }
 
-static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum,
+static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
                        u64 *generation)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
@@ -650,9 +630,9 @@ out:
  */
 
 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-                       const struct gfs2_inum *inum, unsigned int mode,
+                       const struct gfs2_inum_host *inum, unsigned int mode,
                        unsigned int uid, unsigned int gid,
-                       const u64 *generation)
+                       const u64 *generation, dev_t dev)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
        struct gfs2_dinode *di;
@@ -669,14 +649,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
        di->di_mode = cpu_to_be32(mode);
        di->di_uid = cpu_to_be32(uid);
        di->di_gid = cpu_to_be32(gid);
-       di->di_nlink = cpu_to_be32(0);
-       di->di_size = cpu_to_be64(0);
+       di->di_nlink = 0;
+       di->di_size = 0;
        di->di_blocks = cpu_to_be64(1);
        di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
-       di->di_major = di->di_minor = cpu_to_be32(0);
+       di->di_major = cpu_to_be32(MAJOR(dev));
+       di->di_minor = cpu_to_be32(MINOR(dev));
        di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
        di->di_generation = cpu_to_be64(*generation);
-       di->di_flags = cpu_to_be32(0);
+       di->di_flags = 0;
 
        if (S_ISREG(mode)) {
                if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
@@ -693,22 +674,22 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
        }
 
        di->__pad1 = 0;
-       di->di_payload_format = cpu_to_be32(0);
-       di->di_height = cpu_to_be32(0);
+       di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
+       di->di_height = 0;
        di->__pad2 = 0;
        di->__pad3 = 0;
-       di->di_depth = cpu_to_be16(0);
-       di->di_entries = cpu_to_be32(0);
+       di->di_depth = 0;
+       di->di_entries = 0;
        memset(&di->__pad4, 0, sizeof(di->__pad4));
-       di->di_eattr = cpu_to_be64(0);
+       di->di_eattr = 0;
        memset(&di->di_reserved, 0, sizeof(di->di_reserved));
 
        brelse(dibh);
 }
 
 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
-                      unsigned int mode, const struct gfs2_inum *inum,
-                      const u64 *generation)
+                      unsigned int mode, const struct gfs2_inum_host *inum,
+                      const u64 *generation, dev_t dev)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
        unsigned int uid, gid;
@@ -729,7 +710,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
        if (error)
                goto out_quota;
 
-       init_dinode(dip, gl, inum, mode, uid, gid, generation);
+       init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
        gfs2_quota_change(dip, +1, uid, gid);
        gfs2_trans_end(sdp);
 
@@ -759,8 +740,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
        if (alloc_required < 0)
                goto fail;
        if (alloc_required) {
-               error = gfs2_quota_check(dip, dip->i_di.di_uid,
-                                        dip->i_di.di_gid);
+               error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
                if (error)
                        goto fail_quota_locks;
 
@@ -782,16 +762,16 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
                        goto fail_quota_locks;
        }
 
-       error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
+       error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
        if (error)
                goto fail_end_trans;
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (error)
                goto fail_end_trans;
-       ip->i_di.di_nlink = 1;
+       ip->i_inode.i_nlink = 1;
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
        return 0;
 
@@ -860,13 +840,13 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
  */
 
 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
-                          unsigned int mode)
+                          unsigned int mode, dev_t dev)
 {
        struct inode *inode;
        struct gfs2_inode *dip = ghs->gh_gl->gl_object;
        struct inode *dir = &dip->i_inode;
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        int error;
        u64 generation;
 
@@ -890,35 +870,12 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
        if (error)
                goto fail_gunlock;
 
-       if (inum.no_addr < dip->i_num.no_addr) {
-               gfs2_glock_dq(ghs);
-
-               error = gfs2_glock_nq_num(sdp, inum.no_addr,
-                                         &gfs2_inode_glops, LM_ST_EXCLUSIVE,
-                                         GL_SKIP, ghs + 1);
-               if (error) {
-                       return ERR_PTR(error);
-               }
-
-               gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
-               error = gfs2_glock_nq(ghs);
-               if (error) {
-                       gfs2_glock_dq_uninit(ghs + 1);
-                       return ERR_PTR(error);
-               }
-
-               error = create_ok(dip, name, mode);
-               if (error)
-                       goto fail_gunlock2;
-       } else {
-               error = gfs2_glock_nq_num(sdp, inum.no_addr,
-                                         &gfs2_inode_glops, LM_ST_EXCLUSIVE,
-                                         GL_SKIP, ghs + 1);
-               if (error)
-                       goto fail_gunlock;
-       }
+       error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
+                                 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       if (error)
+               goto fail_gunlock;
 
-       error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation);
+       error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
        if (error)
                goto fail_gunlock2;
 
@@ -975,7 +932,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
 
        if (ip->i_di.di_entries != 2) {
                if (gfs2_consist_inode(ip))
-                       gfs2_dinode_print(&ip->i_di);
+                       gfs2_dinode_print(ip);
                return -EIO;
        }
 
@@ -997,7 +954,12 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
        if (error)
                return error;
 
-       error = gfs2_change_nlink(ip, -2);
+       /* It looks odd, but it really should be done twice */
+       error = gfs2_change_nlink(ip, -1);
+       if (error)
+               return error;
+
+       error = gfs2_change_nlink(ip, -1);
        if (error)
                return error;
 
@@ -1018,16 +980,16 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
                   struct gfs2_inode *ip)
 {
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        unsigned int type;
        int error;
 
        if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
                return -EPERM;
 
-       if ((dip->i_di.di_mode & S_ISVTX) &&
-           dip->i_di.di_uid != current->fsuid &&
-           ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER))
+       if ((dip->i_inode.i_mode & S_ISVTX) &&
+           dip->i_inode.i_uid != current->fsuid &&
+           ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
                return -EPERM;
 
        if (IS_APPEND(&dip->i_inode))
@@ -1044,7 +1006,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
        if (!gfs2_inum_equal(&inum, &ip->i_num))
                return -ENOENT;
 
-       if (IF2DT(ip->i_di.di_mode) != type) {
+       if (IF2DT(ip->i_inode.i_mode) != type) {
                gfs2_consist_inode(dip);
                return -EIO;
        }
@@ -1194,7 +1156,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
                return 0;
 
        curtime = get_seconds();
-       if (curtime - ip->i_di.di_atime >= quantum) {
+       if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
                gfs2_glock_dq(gh);
                gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
                                   gh);
@@ -1206,7 +1168,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
                   trying to get exclusive lock. */
 
                curtime = get_seconds();
-               if (curtime - ip->i_di.di_atime >= quantum) {
+               if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
                        struct buffer_head *dibh;
                        struct gfs2_dinode *di;
 
@@ -1220,11 +1182,11 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
                        if (error)
                                goto fail_end_trans;
 
-                       ip->i_di.di_atime = curtime;
+                       ip->i_inode.i_atime.tv_sec = curtime;
 
                        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
                        di = (struct gfs2_dinode *)dibh->b_data;
-                       di->di_atime = cpu_to_be64(ip->i_di.di_atime);
+                       di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
                        brelse(dibh);
 
                        gfs2_trans_end(sdp);
@@ -1249,92 +1211,6 @@ fail:
        return error;
 }
 
-/**
- * glock_compare_atime - Compare two struct gfs2_glock structures for sort
- * @arg_a: the first structure
- * @arg_b: the second structure
- *
- * Returns: 1 if A > B
- *         -1 if A < B
- *          0 if A == B
- */
-
-static int glock_compare_atime(const void *arg_a, const void *arg_b)
-{
-       const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
-       const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
-       const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
-       const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
-
-       if (a->ln_number > b->ln_number)
-               return 1;
-       if (a->ln_number < b->ln_number)
-               return -1;
-       if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
-               return 1;
-       if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
-               return 1;
-
-       return 0;
-}
-
-/**
- * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
- *      atime update
- * @num_gh: the number of structures
- * @ghs: an array of struct gfs2_holder structures
- *
- * Returns: 0 on success (all glocks acquired),
- *          errno on failure (no glocks acquired)
- */
-
-int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
-{
-       struct gfs2_holder **p;
-       unsigned int x;
-       int error = 0;
-
-       if (!num_gh)
-               return 0;
-
-       if (num_gh == 1) {
-               ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-               if (ghs->gh_flags & GL_ATIME)
-                       error = gfs2_glock_nq_atime(ghs);
-               else
-                       error = gfs2_glock_nq(ghs);
-               return error;
-       }
-
-       p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
-       if (!p)
-               return -ENOMEM;
-
-       for (x = 0; x < num_gh; x++)
-               p[x] = &ghs[x];
-
-       sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
-
-       for (x = 0; x < num_gh; x++) {
-               p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
-
-               if (p[x]->gh_flags & GL_ATIME)
-                       error = gfs2_glock_nq_atime(p[x]);
-               else
-                       error = gfs2_glock_nq(p[x]);
-
-               if (error) {
-                       while (x--)
-                               gfs2_glock_dq(p[x]);
-                       break;
-               }
-       }
-
-       kfree(p);
-       return error;
-}
-
-
 static int
 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
 {
@@ -1345,10 +1221,8 @@ __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
        if (!error) {
                error = inode_setattr(&ip->i_inode, attr);
                gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
-               gfs2_inode_attr_out(ip);
-
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
        return error;
index f5d8617605795be28aabdc1cdf11fa002780ebb4..b57f448b15bc42e09c18c2889fe866bdc2efacc6 100644 (file)
@@ -22,13 +22,19 @@ static inline int gfs2_is_jdata(struct gfs2_inode *ip)
 
 static inline int gfs2_is_dir(struct gfs2_inode *ip)
 {
-       return S_ISDIR(ip->i_di.di_mode);
+       return S_ISDIR(ip->i_inode.i_mode);
+}
+
+static inline void gfs2_set_inode_blocks(struct inode *inode)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       inode->i_blocks = ip->i_di.di_blocks <<
+               (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
 }
 
 void gfs2_inode_attr_in(struct gfs2_inode *ip);
-void gfs2_inode_attr_out(struct gfs2_inode *ip);
-struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type);
-struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
+struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type);
+struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum);
 
 int gfs2_inode_refresh(struct gfs2_inode *ip);
 
@@ -37,19 +43,15 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
                           int is_root, struct nameidata *nd);
 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
-                          unsigned int mode);
+                          unsigned int mode, dev_t dev);
 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
                struct gfs2_inode *ip);
 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
                   struct gfs2_inode *ip);
 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
-
 int gfs2_glock_nq_atime(struct gfs2_holder *gh);
-int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
-
 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
-
 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
 
 #endif /* __INODE_DOT_H__ */
index 0cace3da9dbb41795b6cacdbe97de52786d3678e..291415ddfe51cb291ccd66b23703e63a58b089d7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/lm_interface.h>
+#include <linux/delay.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -142,7 +143,7 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
        return list_empty(&ai->ai_ail1_list);
 }
 
-void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
 {
        struct list_head *head = &sdp->sd_ail1_list;
        u64 sync_gen;
@@ -261,6 +262,12 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
  * @sdp: The GFS2 superblock
  * @blks: The number of blocks to reserve
  *
+ * Note that we never give out the last 6 blocks of the journal. Thats
+ * due to the fact that there is are a small number of header blocks
+ * associated with each log flush. The exact number can't be known until
+ * flush time, so we ensure that we have just enough free blocks at all
+ * times to avoid running out during a log flush.
+ *
  * Returns: errno
  */
 
@@ -274,7 +281,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 
        mutex_lock(&sdp->sd_log_reserve_mutex);
        gfs2_log_lock(sdp);
-       while(sdp->sd_log_blks_free <= blks) {
+       while(sdp->sd_log_blks_free <= (blks + 6)) {
                gfs2_log_unlock(sdp);
                gfs2_ail1_empty(sdp, 0);
                gfs2_log_flush(sdp, NULL);
@@ -319,7 +326,8 @@ static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
        bh_map.b_size = 1 << inode->i_blkbits;
        error = gfs2_block_map(inode, lbn, 0, &bh_map);
        if (error || !bh_map.b_blocknr)
-               printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn);
+               printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
+                      (unsigned long long)bh_map.b_blocknr, lbn);
        gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
 
        return bh_map.b_blocknr;
@@ -643,12 +651,9 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        up_read(&sdp->sd_log_flush_lock);
 
        gfs2_log_lock(sdp);
-       if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
-               gfs2_log_unlock(sdp);
-               gfs2_log_flush(sdp, NULL);
-       } else {
-               gfs2_log_unlock(sdp);
-       }
+       if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
+               wake_up_process(sdp->sd_logd_process);
+       gfs2_log_unlock(sdp);
 }
 
 /**
@@ -686,3 +691,21 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
        up_write(&sdp->sd_log_flush_lock);
 }
 
+
+/**
+ * gfs2_meta_syncfs - sync all the buffers in a filesystem
+ * @sdp: the filesystem
+ *
+ */
+
+void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
+{
+       gfs2_log_flush(sdp, NULL);
+       for (;;) {
+               gfs2_ail1_start(sdp, DIO_ALL);
+               if (gfs2_ail1_empty(sdp, DIO_ALL))
+                       break;
+               msleep(10);
+       }
+}
+
index 7f5737d55612883e5de8bb922d34d9e9053245bc..8e7aa0f2910961fedd690f9641732bc9e7ed64a7 100644 (file)
@@ -48,7 +48,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
                            unsigned int ssize);
 
-void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
 
 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
@@ -61,5 +60,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
 
 void gfs2_log_shutdown(struct gfs2_sbd *sdp);
+void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index ab6d1115f95d5fd2cfdb407a7338e0535fc9c487..4d7f94d8c7bd2ca07162fed385efcc1ae65ea835 100644 (file)
@@ -182,7 +182,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
 }
 
 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
-                              struct gfs2_log_header *head, int pass)
+                              struct gfs2_log_header_host *head, int pass)
 {
        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 
@@ -328,7 +328,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
 }
 
 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
-                                 struct gfs2_log_header *head, int pass)
+                                 struct gfs2_log_header_host *head, int pass)
 {
        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
 
@@ -509,7 +509,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
 {
        LIST_HEAD(started);
        struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
-       struct buffer_head *bh = NULL;
+       struct buffer_head *bh = NULL,*bh1 = NULL;
        unsigned int offset = sizeof(struct gfs2_log_descriptor);
        struct gfs2_log_descriptor *ld;
        unsigned int limit;
@@ -537,8 +537,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
                list_for_each_entry_safe_continue(bd1, bdt,
                                                  &sdp->sd_log_le_databuf,
                                                  bd_le.le_list) {
+                       /* store off the buffer head in a local ptr since
+                        * gfs2_bufdata might change when we drop the log lock
+                        */
+                       bh1 = bd1->bd_bh;
+
                        /* An ordered write buffer */
-                       if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
+                       if (bh1 && !buffer_pinned(bh1)) {
                                list_move(&bd1->bd_le.le_list, &started);
                                if (bd1 == bd2) {
                                        bd2 = NULL;
@@ -547,20 +552,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
                                                        bd_le.le_list);
                                }
                                total_dbuf--;
-                               if (bd1->bd_bh) {
-                                       get_bh(bd1->bd_bh);
-                                       if (buffer_dirty(bd1->bd_bh)) {
+                               if (bh1) {
+                                       if (buffer_dirty(bh1)) {
+                                               get_bh(bh1);
+
                                                gfs2_log_unlock(sdp);
-                                               wait_on_buffer(bd1->bd_bh);
-                                               ll_rw_block(WRITE, 1,
-                                                           &bd1->bd_bh);
+
+                                               ll_rw_block(SWRITE, 1, &bh1);
+                                               brelse(bh1);
+
                                                gfs2_log_lock(sdp);
                                        }
-                                       brelse(bd1->bd_bh);
                                        continue;
                                }
                                continue;
-                       } else if (bd1->bd_bh) { /* A journaled buffer */
+                       } else if (bh1) { /* A journaled buffer */
                                int magic;
                                gfs2_log_unlock(sdp);
                                if (!bh) {
@@ -582,16 +588,16 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
                                        ld->ld_data2 = cpu_to_be32(0);
                                        memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
                                }
-                               magic = gfs2_check_magic(bd1->bd_bh);
-                               *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
+                               magic = gfs2_check_magic(bh1);
+                               *ptr++ = cpu_to_be64(bh1->b_blocknr);
                                *ptr++ = cpu_to_be64((__u64)magic);
-                               clear_buffer_escaped(bd1->bd_bh);
+                               clear_buffer_escaped(bh1);
                                if (unlikely(magic != 0))
-                                       set_buffer_escaped(bd1->bd_bh);
+                                       set_buffer_escaped(bh1);
                                gfs2_log_lock(sdp);
                                if (n++ > num)
                                        break;
-                       } else if (!bd1->bd_bh) {
+                       } else if (!bh1) {
                                total_dbuf--;
                                sdp->sd_log_num_databuf--;
                                list_del_init(&bd1->bd_le.le_list);
index 5839c05ae6be29cfc7fe692c2c8f1028aaead2dd..965bc65c7c6432b9f47f3937d4dbd27f5b4c9446 100644 (file)
@@ -60,7 +60,7 @@ static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
 }
 
 static inline void lops_before_scan(struct gfs2_jdesc *jd,
-                                   struct gfs2_log_header *head,
+                                   struct gfs2_log_header_host *head,
                                    unsigned int pass)
 {
        int x;
index 9889c1eacec186efe383f9c9801563123bb0511d..7c1a9e22a526e0c0203d8d1426c2125fe81fcf70 100644 (file)
@@ -25,7 +25,7 @@
 #include "util.h"
 #include "glock.h"
 
-static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct gfs2_inode *ip = foo;
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
@@ -37,7 +37,7 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long
        }
 }
 
-static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct gfs2_glock *gl = foo;
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
index 3912d6a4b1e65a7debb78120d0817db1106d1f33..0e34d9918973126c710c227bf35197bac57c30cc 100644 (file)
@@ -127,17 +127,17 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
 
 /**
  * getbuf - Get a buffer with a given address space
- * @sdp: the filesystem
- * @aspace: the address space
+ * @gl: the glock
  * @blkno: the block number (filesystem scope)
  * @create: 1 if the buffer should be created
  *
  * Returns: the buffer
  */
 
-static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
-                                 u64 blkno, int create)
+static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
 {
+       struct address_space *mapping = gl->gl_aspace->i_mapping;
+       struct gfs2_sbd *sdp = gl->gl_sbd;
        struct page *page;
        struct buffer_head *bh;
        unsigned int shift;
@@ -150,13 +150,13 @@ static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
 
        if (create) {
                for (;;) {
-                       page = grab_cache_page(aspace->i_mapping, index);
+                       page = grab_cache_page(mapping, index);
                        if (page)
                                break;
                        yield();
                }
        } else {
-               page = find_lock_page(aspace->i_mapping, index);
+               page = find_lock_page(mapping, index);
                if (!page)
                        return NULL;
        }
@@ -202,7 +202,7 @@ static void meta_prep_new(struct buffer_head *bh)
 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
 {
        struct buffer_head *bh;
-       bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
+       bh = getbuf(gl, blkno, CREATE);
        meta_prep_new(bh);
        return bh;
 }
@@ -220,7 +220,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
                   struct buffer_head **bhp)
 {
-       *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
+       *bhp = getbuf(gl, blkno, CREATE);
        if (!buffer_uptodate(*bhp))
                ll_rw_block(READ_META, 1, bhp);
        if (flags & DIO_WAIT) {
@@ -379,11 +379,10 @@ void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-       struct inode *aspace = ip->i_gl->gl_aspace;
        struct buffer_head *bh;
 
        while (blen) {
-               bh = getbuf(sdp, aspace, bstart, NO_CREATE);
+               bh = getbuf(ip->i_gl, bstart, NO_CREATE);
                if (bh) {
                        struct gfs2_bufdata *bd = bh->b_private;
 
@@ -472,6 +471,9 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
        struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
        int in_cache = 0;
 
+       BUG_ON(!gl);
+       BUG_ON(!sdp);
+
        spin_lock(&ip->i_spin);
        if (*bh_slot && (*bh_slot)->b_blocknr == num) {
                bh = *bh_slot;
@@ -481,7 +483,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
        spin_unlock(&ip->i_spin);
 
        if (!bh)
-               bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
+               bh = getbuf(gl, num, CREATE);
 
        if (!bh)
                return -ENOBUFS;
@@ -532,7 +534,6 @@ err:
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
-       struct inode *aspace = gl->gl_aspace;
        struct buffer_head *first_bh, *bh;
        u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
                          sdp->sd_sb.sb_bsize_shift;
@@ -544,7 +545,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (extlen > max_ra)
                extlen = max_ra;
 
-       first_bh = getbuf(sdp, aspace, dblock, CREATE);
+       first_bh = getbuf(gl, dblock, CREATE);
 
        if (buffer_uptodate(first_bh))
                goto out;
@@ -555,7 +556,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        extlen--;
 
        while (extlen) {
-               bh = getbuf(sdp, aspace, dblock, CREATE);
+               bh = getbuf(gl, dblock, CREATE);
 
                if (!buffer_uptodate(bh) && !buffer_locked(bh))
                        ll_rw_block(READA, 1, &bh);
@@ -571,20 +572,3 @@ out:
        return first_bh;
 }
 
-/**
- * gfs2_meta_syncfs - sync all the buffers in a filesystem
- * @sdp: the filesystem
- *
- */
-
-void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
-{
-       gfs2_log_flush(sdp, NULL);
-       for (;;) {
-               gfs2_ail1_start(sdp, DIO_ALL);
-               if (gfs2_ail1_empty(sdp, DIO_ALL))
-                       break;
-               msleep(10);
-       }
-}
-
index 3ec939e20dffeffcd66a207fdd58687a2a4947e5..e037425bc0427e6cff31c6bda1bae2daa16cf626 100644 (file)
@@ -67,7 +67,6 @@ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
 }
 
 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
-void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
 
 #define buffer_busy(bh) \
 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
index 1025960b0e6e8242d8f97bdd2f288576c0d2416f..f2495f1e21adc0fde6f4a51ff73ff1fa8376bb43 100644 (file)
@@ -15,6 +15,8 @@
 
 #include "gfs2.h"
 #include <linux/gfs2_ondisk.h>
+#include <linux/lm_interface.h>
+#include "incore.h"
 
 #define pv(struct, member, fmt) printk(KERN_INFO "  "#member" = "fmt"\n", \
                                       struct->member);
@@ -32,7 +34,7 @@
  * first arg: the cpu-order structure
  */
 
-void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
+void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf)
 {
        const struct gfs2_inum *str = buf;
 
@@ -40,7 +42,7 @@ void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
        no->no_addr = be64_to_cpu(str->no_addr);
 }
 
-void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
+void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf)
 {
        struct gfs2_inum *str = buf;
 
@@ -48,13 +50,13 @@ void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
        str->no_addr = cpu_to_be64(no->no_addr);
 }
 
-static void gfs2_inum_print(const struct gfs2_inum *no)
+static void gfs2_inum_print(const struct gfs2_inum_host *no)
 {
        printk(KERN_INFO "  no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
        printk(KERN_INFO "  no_addr = %llu\n", (unsigned long long)no->no_addr);
 }
 
-static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
+static void gfs2_meta_header_in(struct gfs2_meta_header_host *mh, const void *buf)
 {
        const struct gfs2_meta_header *str = buf;
 
@@ -63,23 +65,7 @@ static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
        mh->mh_format = be32_to_cpu(str->mh_format);
 }
 
-static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf)
-{
-       struct gfs2_meta_header *str = buf;
-
-       str->mh_magic = cpu_to_be32(mh->mh_magic);
-       str->mh_type = cpu_to_be32(mh->mh_type);
-       str->mh_format = cpu_to_be32(mh->mh_format);
-}
-
-static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
-{
-       pv(mh, mh_magic, "0x%.8X");
-       pv(mh, mh_type, "%u");
-       pv(mh, mh_format, "%u");
-}
-
-void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
+void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
 {
        const struct gfs2_sb *str = buf;
 
@@ -97,7 +83,7 @@ void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
        memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
 }
 
-void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
+void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf)
 {
        const struct gfs2_rindex *str = buf;
 
@@ -109,7 +95,7 @@ void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
 
 }
 
-void gfs2_rindex_print(const struct gfs2_rindex *ri)
+void gfs2_rindex_print(const struct gfs2_rindex_host *ri)
 {
        printk(KERN_INFO "  ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
        pv(ri, ri_length, "%u");
@@ -120,22 +106,20 @@ void gfs2_rindex_print(const struct gfs2_rindex *ri)
        pv(ri, ri_bitbytes, "%u");
 }
 
-void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf)
+void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
 {
        const struct gfs2_rgrp *str = buf;
 
-       gfs2_meta_header_in(&rg->rg_header, buf);
        rg->rg_flags = be32_to_cpu(str->rg_flags);
        rg->rg_free = be32_to_cpu(str->rg_free);
        rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
        rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
 }
 
-void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
+void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
 {
        struct gfs2_rgrp *str = buf;
 
-       gfs2_meta_header_out(&rg->rg_header, buf);
        str->rg_flags = cpu_to_be32(rg->rg_flags);
        str->rg_free = cpu_to_be32(rg->rg_free);
        str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
@@ -144,7 +128,7 @@ void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
        memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
 }
 
-void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
+void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
 {
        const struct gfs2_quota *str = buf;
 
@@ -153,96 +137,56 @@ void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
        qu->qu_value = be64_to_cpu(str->qu_value);
 }
 
-void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf)
-{
-       const struct gfs2_dinode *str = buf;
-
-       gfs2_meta_header_in(&di->di_header, buf);
-       gfs2_inum_in(&di->di_num, &str->di_num);
-
-       di->di_mode = be32_to_cpu(str->di_mode);
-       di->di_uid = be32_to_cpu(str->di_uid);
-       di->di_gid = be32_to_cpu(str->di_gid);
-       di->di_nlink = be32_to_cpu(str->di_nlink);
-       di->di_size = be64_to_cpu(str->di_size);
-       di->di_blocks = be64_to_cpu(str->di_blocks);
-       di->di_atime = be64_to_cpu(str->di_atime);
-       di->di_mtime = be64_to_cpu(str->di_mtime);
-       di->di_ctime = be64_to_cpu(str->di_ctime);
-       di->di_major = be32_to_cpu(str->di_major);
-       di->di_minor = be32_to_cpu(str->di_minor);
-
-       di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
-       di->di_goal_data = be64_to_cpu(str->di_goal_data);
-       di->di_generation = be64_to_cpu(str->di_generation);
-
-       di->di_flags = be32_to_cpu(str->di_flags);
-       di->di_payload_format = be32_to_cpu(str->di_payload_format);
-       di->di_height = be16_to_cpu(str->di_height);
-
-       di->di_depth = be16_to_cpu(str->di_depth);
-       di->di_entries = be32_to_cpu(str->di_entries);
-
-       di->di_eattr = be64_to_cpu(str->di_eattr);
-
-}
-
-void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
+void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
 {
+       const struct gfs2_dinode_host *di = &ip->i_di;
        struct gfs2_dinode *str = buf;
 
-       gfs2_meta_header_out(&di->di_header, buf);
-       gfs2_inum_out(&di->di_num, (char *)&str->di_num);
+       str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
+       str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
+       str->di_header.__pad0 = 0;
+       str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
+       str->di_header.__pad1 = 0;
 
-       str->di_mode = cpu_to_be32(di->di_mode);
-       str->di_uid = cpu_to_be32(di->di_uid);
-       str->di_gid = cpu_to_be32(di->di_gid);
-       str->di_nlink = cpu_to_be32(di->di_nlink);
+       gfs2_inum_out(&ip->i_num, &str->di_num);
+
+       str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
+       str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
+       str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
+       str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
        str->di_size = cpu_to_be64(di->di_size);
        str->di_blocks = cpu_to_be64(di->di_blocks);
-       str->di_atime = cpu_to_be64(di->di_atime);
-       str->di_mtime = cpu_to_be64(di->di_mtime);
-       str->di_ctime = cpu_to_be64(di->di_ctime);
-       str->di_major = cpu_to_be32(di->di_major);
-       str->di_minor = cpu_to_be32(di->di_minor);
+       str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
+       str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
+       str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
 
        str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
        str->di_goal_data = cpu_to_be64(di->di_goal_data);
        str->di_generation = cpu_to_be64(di->di_generation);
 
        str->di_flags = cpu_to_be32(di->di_flags);
-       str->di_payload_format = cpu_to_be32(di->di_payload_format);
        str->di_height = cpu_to_be16(di->di_height);
-
+       str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
+                                            !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
+                                            GFS2_FORMAT_DE : 0);
        str->di_depth = cpu_to_be16(di->di_depth);
        str->di_entries = cpu_to_be32(di->di_entries);
 
        str->di_eattr = cpu_to_be64(di->di_eattr);
-
 }
 
-void gfs2_dinode_print(const struct gfs2_dinode *di)
+void gfs2_dinode_print(const struct gfs2_inode *ip)
 {
-       gfs2_meta_header_print(&di->di_header);
-       gfs2_inum_print(&di->di_num);
+       const struct gfs2_dinode_host *di = &ip->i_di;
+
+       gfs2_inum_print(&ip->i_num);
 
-       pv(di, di_mode, "0%o");
-       pv(di, di_uid, "%u");
-       pv(di, di_gid, "%u");
-       pv(di, di_nlink, "%u");
        printk(KERN_INFO "  di_size = %llu\n", (unsigned long long)di->di_size);
        printk(KERN_INFO "  di_blocks = %llu\n", (unsigned long long)di->di_blocks);
-       printk(KERN_INFO "  di_atime = %lld\n", (long long)di->di_atime);
-       printk(KERN_INFO "  di_mtime = %lld\n", (long long)di->di_mtime);
-       printk(KERN_INFO "  di_ctime = %lld\n", (long long)di->di_ctime);
-       pv(di, di_major, "%u");
-       pv(di, di_minor, "%u");
-
        printk(KERN_INFO "  di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
        printk(KERN_INFO "  di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
 
        pv(di, di_flags, "0x%.8X");
-       pv(di, di_payload_format, "%u");
        pv(di, di_height, "%u");
 
        pv(di, di_depth, "%u");
@@ -251,7 +195,7 @@ void gfs2_dinode_print(const struct gfs2_dinode *di)
        printk(KERN_INFO "  di_eattr = %llu\n", (unsigned long long)di->di_eattr);
 }
 
-void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
+void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
 {
        const struct gfs2_log_header *str = buf;
 
@@ -263,7 +207,7 @@ void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
        lh->lh_hash = be32_to_cpu(str->lh_hash);
 }
 
-void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
+void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
 {
        const struct gfs2_inum_range *str = buf;
 
@@ -271,7 +215,7 @@ void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
        ir->ir_length = be64_to_cpu(str->ir_length);
 }
 
-void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
+void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
 {
        struct gfs2_inum_range *str = buf;
 
@@ -279,7 +223,7 @@ void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
        str->ir_length = cpu_to_be64(ir->ir_length);
 }
 
-void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
 {
        const struct gfs2_statfs_change *str = buf;
 
@@ -288,7 +232,7 @@ void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
        sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
 }
 
-void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
 {
        struct gfs2_statfs_change *str = buf;
 
@@ -297,7 +241,7 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
        str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
 }
 
-void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf)
+void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
 {
        const struct gfs2_quota_change *str = buf;
 
index 015640b3f123fcde76a28195c1859577c38ef9b7..d8d69a72a10dd819bfcdbe6a736009bda5a92e27 100644 (file)
@@ -156,19 +156,6 @@ out_ignore:
        return 0;
 }
 
-static int zero_readpage(struct page *page)
-{
-       void *kaddr;
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr, 0, PAGE_CACHE_SIZE);
-       kunmap_atomic(kaddr, KM_USER0);
-
-       SetPageUptodate(page);
-
-       return 0;
-}
-
 /**
  * stuffed_readpage - Fill in a Linux page with stuffed file data
  * @ip: the inode
@@ -183,9 +170,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        void *kaddr;
        int error;
 
-       /* Only the first page of a stuffed file might contain data */
-       if (unlikely(page->index))
-               return zero_readpage(page);
+       BUG_ON(page->index);
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
        if (error)
@@ -230,9 +215,9 @@ static int gfs2_readpage(struct file *file, struct page *page)
                                /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
                                goto skip_lock;
                }
-               gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
+               gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
                do_unlock = 1;
-               error = gfs2_glock_nq_m_atime(1, &gh);
+               error = gfs2_glock_nq_atime(&gh);
                if (unlikely(error))
                        goto out_unlock;
        }
@@ -254,6 +239,8 @@ skip_lock:
 out:
        return error;
 out_unlock:
+       if (error == GLR_TRYFAILED)
+               error = AOP_TRUNCATED_PAGE;
        unlock_page(page);
        if (do_unlock)
                gfs2_holder_uninit(&gh);
@@ -293,9 +280,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
                                goto skip_lock;
                }
                gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
-                                LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
+                                LM_FLAG_TRY_1CB|GL_ATIME, &gh);
                do_unlock = 1;
-               ret = gfs2_glock_nq_m_atime(1, &gh);
+               ret = gfs2_glock_nq_atime(&gh);
                if (ret == GLR_TRYFAILED)
                        goto out_noerror;
                if (unlikely(ret))
@@ -366,10 +353,13 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
        unsigned int write_len = to - from;
 
 
-       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
-       error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
-       if (error)
+       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
+       error = gfs2_glock_nq_atime(&ip->i_gh);
+       if (unlikely(error)) {
+               if (error == GLR_TRYFAILED)
+                       error = AOP_TRUNCATED_PAGE;
                goto out_uninit;
+       }
 
        gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
 
@@ -386,7 +376,7 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
                if (error)
                        goto out_alloc_put;
 
-               error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+               error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
                if (error)
                        goto out_qunlock;
 
@@ -482,8 +472,10 @@ static int gfs2_commit_write(struct file *file, struct page *page,
 
                SetPageUptodate(page);
 
-               if (inode->i_size < file_size)
+               if (inode->i_size < file_size) {
                        i_size_write(inode, file_size);
+                       mark_inode_dirty(inode);
+               }
        } else {
                if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
                    gfs2_is_jdata(ip))
@@ -498,11 +490,6 @@ static int gfs2_commit_write(struct file *file, struct page *page,
                di->di_size = cpu_to_be64(inode->i_size);
        }
 
-       di->di_mode = cpu_to_be32(inode->i_mode);
-       di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
-       di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
-       di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
-
        brelse(dibh);
        gfs2_trans_end(sdp);
        if (al->al_requested) {
@@ -624,7 +611,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
         * on this path. All we need change is atime.
         */
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
-       rv = gfs2_glock_nq_m_atime(1, &gh);
+       rv = gfs2_glock_nq_atime(&gh);
        if (rv)
                goto out;
 
@@ -737,6 +724,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
                        if (!atomic_read(&aspace->i_writecount))
                                return 0;
 
+                       if (!(gfp_mask & __GFP_WAIT))
+                               return 0;
+
                        if (time_after_eq(jiffies, t)) {
                                stuck_releasepage(bh);
                                /* should we withdraw here? */
index 00041b1b802522254a8cb8689f4a67d245101808..d355899585d822b800d15f2d8826734bc2d02a6f 100644 (file)
@@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
        struct inode *inode = dentry->d_inode;
        struct gfs2_holder d_gh;
        struct gfs2_inode *ip;
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        unsigned int type;
        int error;
 
@@ -76,7 +76,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
        if (!gfs2_inum_equal(&ip->i_num, &inum))
                goto invalid_gunlock;
 
-       if (IF2DT(ip->i_di.di_mode) != type) {
+       if (IF2DT(ip->i_inode.i_mode) != type) {
                gfs2_consist_inode(dip);
                goto fail_gunlock;
        }
index 86127d93bd35f6f846f7d5b46f53cfd4fa0ccb53..b4e7b8775315e34dd70ad60b935875d3ed274b1c 100644 (file)
 #include "util.h"
 
 static struct dentry *gfs2_decode_fh(struct super_block *sb,
-                                    __u32 *fh,
+                                    __u32 *p,
                                     int fh_len,
                                     int fh_type,
                                     int (*acceptable)(void *context,
                                                       struct dentry *dentry),
                                     void *context)
 {
+       __be32 *fh = (__force __be32 *)p;
        struct gfs2_fh_obj fh_obj;
-       struct gfs2_inum *this, parent;
+       struct gfs2_inum_host *this, parent;
 
        if (fh_type != fh_len)
                return NULL;
@@ -65,9 +66,10 @@ static struct dentry *gfs2_decode_fh(struct super_block *sb,
                                                    acceptable, context);
 }
 
-static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
+static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
                          int connectable)
 {
+       __be32 *fh = (__force __be32 *)p;
        struct inode *inode = dentry->d_inode;
        struct super_block *sb = inode->i_sb;
        struct gfs2_inode *ip = GFS2_I(inode);
@@ -76,14 +78,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
            (connectable && *len < GFS2_LARGE_FH_SIZE))
                return 255;
 
-       fh[0] = ip->i_num.no_formal_ino >> 32;
-       fh[0] = cpu_to_be32(fh[0]);
-       fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
-       fh[1] = cpu_to_be32(fh[1]);
-       fh[2] = ip->i_num.no_addr >> 32;
-       fh[2] = cpu_to_be32(fh[2]);
-       fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
-       fh[3] = cpu_to_be32(fh[3]);
+       fh[0] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
+       fh[1] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
+       fh[2] = cpu_to_be32(ip->i_num.no_addr >> 32);
+       fh[3] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
        *len = GFS2_SMALL_FH_SIZE;
 
        if (!connectable || inode == sb->s_root->d_inode)
@@ -95,14 +93,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
        igrab(inode);
        spin_unlock(&dentry->d_lock);
 
-       fh[4] = ip->i_num.no_formal_ino >> 32;
-       fh[4] = cpu_to_be32(fh[4]);
-       fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
-       fh[5] = cpu_to_be32(fh[5]);
-       fh[6] = ip->i_num.no_addr >> 32;
-       fh[6] = cpu_to_be32(fh[6]);
-       fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
-       fh[7] = cpu_to_be32(fh[7]);
+       fh[4] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
+       fh[5] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
+       fh[6] = cpu_to_be32(ip->i_num.no_addr >> 32);
+       fh[7] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
 
        fh[8]  = cpu_to_be32(inode->i_mode);
        fh[9]  = 0;     /* pad to double word */
@@ -114,12 +108,12 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
 }
 
 struct get_name_filldir {
-       struct gfs2_inum inum;
+       struct gfs2_inum_host inum;
        char *name;
 };
 
 static int get_name_filldir(void *opaque, const char *name, unsigned int length,
-                           u64 offset, struct gfs2_inum *inum,
+                           u64 offset, struct gfs2_inum_host *inum,
                            unsigned int type)
 {
        struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
@@ -202,7 +196,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
-       struct gfs2_inum *inum = &fh_obj->this;
+       struct gfs2_inum_host *inum = &fh_obj->this;
        struct gfs2_holder i_gh, ri_gh, rgd_gh;
        struct gfs2_rgrpd *rgd;
        struct inode *inode;
index 09aca5046fb177396ef020e01240b994bc76b9c0..f925a955b3b86124a1c43634a2659ed4fa0b1c11 100644 (file)
@@ -15,7 +15,7 @@
 
 extern struct export_operations gfs2_export_ops;
 struct gfs2_fh_obj {
-       struct gfs2_inum this;
+       struct gfs2_inum_host this;
        __u32            imode;
 };
 
index 3064f133bf3c23380bbb6ee2bc5e824fdfd6dfa9..b3f1e0349ae02f4298421afd99a4caee12026f1e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/ext2_fs.h>
 #include <linux/crc32.h>
 #include <linux/lm_interface.h>
+#include <linux/writeback.h>
 #include <asm/uaccess.h>
 
 #include "gfs2.h"
@@ -71,7 +72,7 @@ static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
                size = count;
 
        kaddr = kmap(page);
-       memcpy(desc->arg.buf, kaddr + offset, size);
+       memcpy(desc->arg.data, kaddr + offset, size);
        kunmap(page);
 
        desc->count = count - size;
@@ -86,7 +87,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
        struct inode *inode = &ip->i_inode;
        read_descriptor_t desc;
        desc.written = 0;
-       desc.arg.buf = buf;
+       desc.arg.data = buf;
        desc.count = size;
        desc.error = 0;
        do_generic_mapping_read(inode->i_mapping, ra_state,
@@ -139,7 +140,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  */
 
 static int filldir_func(void *opaque, const char *name, unsigned int length,
-                       u64 offset, struct gfs2_inum *inum,
+                       u64 offset, struct gfs2_inum_host *inum,
                        unsigned int type)
 {
        struct filldir_reg *fdr = (struct filldir_reg *)opaque;
@@ -253,7 +254,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
        u32 fsflags;
 
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
-       error = gfs2_glock_nq_m_atime(1, &gh);
+       error = gfs2_glock_nq_atime(&gh);
        if (error)
                return error;
 
@@ -266,6 +267,24 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
        return error;
 }
 
+void gfs2_set_inode_flags(struct inode *inode)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_dinode_host *di = &ip->i_di;
+       unsigned int flags = inode->i_flags;
+
+       flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+       if (di->di_flags & GFS2_DIF_IMMUTABLE)
+               flags |= S_IMMUTABLE;
+       if (di->di_flags & GFS2_DIF_APPENDONLY)
+               flags |= S_APPEND;
+       if (di->di_flags & GFS2_DIF_NOATIME)
+               flags |= S_NOATIME;
+       if (di->di_flags & GFS2_DIF_SYNC)
+               flags |= S_SYNC;
+       inode->i_flags = flags;
+}
+
 /* Flags that can be set by user space */
 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|                   \
                             GFS2_DIF_DIRECTIO|                 \
@@ -336,8 +355,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
                goto out_trans_end;
        gfs2_trans_add_bh(ip->i_gl, bh, 1);
        ip->i_di.di_flags = new_flags;
-       gfs2_dinode_out(&ip->i_di, bh->b_data);
+       gfs2_dinode_out(ip, bh->b_data);
        brelse(bh);
+       gfs2_set_inode_flags(inode);
 out_trans_end:
        gfs2_trans_end(sdp);
 out:
@@ -425,7 +445,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
        gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
        file->private_data = fp;
 
-       if (S_ISREG(ip->i_di.di_mode)) {
+       if (S_ISREG(ip->i_inode.i_mode)) {
                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
                                           &i_gh);
                if (error)
@@ -484,16 +504,40 @@ static int gfs2_close(struct inode *inode, struct file *file)
  * @file: the file that points to the dentry (we ignore this)
  * @dentry: the dentry that points to the inode to sync
  *
+ * The VFS will flush "normal" data for us. We only need to worry
+ * about metadata here. For journaled data, we just do a log flush
+ * as we can't avoid it. Otherwise we can just bale out if datasync
+ * is set. For stuffed inodes we must flush the log in order to
+ * ensure that all data is on disk.
+ *
+ * The call to write_inode_now() is there to write back metadata and
+ * the inode itself. It does also try and write the data, but thats
+ * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
+ * for us.
+ *
  * Returns: errno
  */
 
 static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
 {
-       struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+       struct inode *inode = dentry->d_inode;
+       int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
+       int ret = 0;
 
-       gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
+       if (gfs2_is_jdata(GFS2_I(inode))) {
+               gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
+               return 0;
+       }
 
-       return 0;
+       if (sync_state != 0) {
+               if (!datasync)
+                       ret = write_inode_now(inode, 0);
+
+               if (gfs2_is_stuffed(GFS2_I(inode)))
+                       gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
+       }
+
+       return ret;
 }
 
 /**
@@ -515,7 +559,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_POSIX))
                return -ENOLCK;
-       if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+       if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
                return -ENOLCK;
 
        if (sdp->sd_args.ar_localflocks) {
@@ -617,7 +661,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
-       if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+       if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
                return -ENOLCK;
 
        if (sdp->sd_args.ar_localflocks)
index ce319f89ec8e9ae95c64469a678a713465b28c76..7e5d8ec9c8463c1bbcc354d13379d57c8844324c 100644 (file)
@@ -17,7 +17,7 @@ extern struct file gfs2_internal_file_sentinel;
 extern int gfs2_internal_read(struct gfs2_inode *ip,
                              struct file_ra_state *ra_state,
                              char *buf, loff_t *pos, unsigned size);
-
+extern void gfs2_set_inode_flags(struct inode *inode);
 extern const struct file_operations gfs2_file_fops;
 extern const struct file_operations gfs2_dir_fops;
 
index 882873a6bd6909cf985c78ed859bbf54fb66c078..d14e139d267478395cf9b91ca31114c02fe4ba53 100644 (file)
@@ -237,7 +237,7 @@ fail:
 }
 
 static struct inode *gfs2_lookup_root(struct super_block *sb,
-                                     struct gfs2_inum *inum)
+                                     struct gfs2_inum_host *inum)
 {
        return gfs2_inode_lookup(sb, inum, DT_DIR);
 }
@@ -246,7 +246,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
 {
        struct super_block *sb = sdp->sd_vfs;
        struct gfs2_holder sb_gh;
-       struct gfs2_inum *inum;
+       struct gfs2_inum_host *inum;
        struct inode *inode;
        int error = 0;
 
index ef6e5ed70e94fea379c5dd5005d90ee71ae64331..636dda4c7d38d688c76923c6289e130e8d4754ac 100644 (file)
@@ -59,7 +59,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
        gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
        for (;;) {
-               inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode);
+               inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0);
                if (!IS_ERR(inode)) {
                        gfs2_trans_end(sdp);
                        if (dip->i_alloc.al_rgd)
@@ -144,7 +144,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        int alloc_required;
        int error;
 
-       if (S_ISDIR(ip->i_di.di_mode))
+       if (S_ISDIR(inode->i_mode))
                return -EPERM;
 
        gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
@@ -169,7 +169,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        error = -EINVAL;
-       if (!dip->i_di.di_nlink)
+       if (!dip->i_inode.i_nlink)
                goto out_gunlock;
        error = -EFBIG;
        if (dip->i_di.di_entries == (u32)-1)
@@ -178,10 +178,10 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
                goto out_gunlock;
        error = -EINVAL;
-       if (!ip->i_di.di_nlink)
+       if (!ip->i_inode.i_nlink)
                goto out_gunlock;
        error = -EMLINK;
-       if (ip->i_di.di_nlink == (u32)-1)
+       if (ip->i_inode.i_nlink == (u32)-1)
                goto out_gunlock;
 
        alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
@@ -196,8 +196,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
                if (error)
                        goto out_alloc;
 
-               error = gfs2_quota_check(dip, dip->i_di.di_uid,
-                                        dip->i_di.di_gid);
+               error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
                if (error)
                        goto out_gunlock_q;
 
@@ -220,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num,
-                            IF2DT(ip->i_di.di_mode));
+                            IF2DT(inode->i_mode));
        if (error)
                goto out_end_trans;
 
@@ -326,7 +325,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
 
        gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-       inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO);
+       inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0);
        if (IS_ERR(inode)) {
                gfs2_holder_uninit(ghs);
                return PTR_ERR(inode);
@@ -339,7 +338,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
        error = gfs2_meta_inode_buffer(ip, &dibh);
 
        if (!gfs2_assert_withdraw(sdp, !error)) {
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname,
                       size);
                brelse(dibh);
@@ -379,7 +378,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-       inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode);
+       inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0);
        if (IS_ERR(inode)) {
                gfs2_holder_uninit(ghs);
                return PTR_ERR(inode);
@@ -387,10 +386,9 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 
        ip = ghs[1].gh_gl->gl_object;
 
-       ip->i_di.di_nlink = 2;
+       ip->i_inode.i_nlink = 2;
        ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
        ip->i_di.di_flags |= GFS2_DIF_JDATA;
-       ip->i_di.di_payload_format = GFS2_FORMAT_DE;
        ip->i_di.di_entries = 2;
 
        error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -414,7 +412,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                gfs2_inum_out(&dip->i_num, &dent->de_inum);
                dent->de_type = cpu_to_be16(DT_DIR);
 
-               gfs2_dinode_out(&ip->i_di, di);
+               gfs2_dinode_out(ip, di);
 
                brelse(dibh);
        }
@@ -467,7 +465,7 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
 
        if (ip->i_di.di_entries < 2) {
                if (gfs2_consist_inode(ip))
-                       gfs2_dinode_print(&ip->i_di);
+                       gfs2_dinode_print(ip);
                error = -EIO;
                goto out_gunlock;
        }
@@ -504,47 +502,19 @@ out:
 static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
                      dev_t dev)
 {
-       struct gfs2_inode *dip = GFS2_I(dir), *ip;
+       struct gfs2_inode *dip = GFS2_I(dir);
        struct gfs2_sbd *sdp = GFS2_SB(dir);
        struct gfs2_holder ghs[2];
        struct inode *inode;
-       struct buffer_head *dibh;
-       u32 major = 0, minor = 0;
-       int error;
-
-       switch (mode & S_IFMT) {
-       case S_IFBLK:
-       case S_IFCHR:
-               major = MAJOR(dev);
-               minor = MINOR(dev);
-               break;
-       case S_IFIFO:
-       case S_IFSOCK:
-               break;
-       default:
-               return -EOPNOTSUPP;
-       };
 
        gfs2_holder_init(dip->i_gl, 0, 0, ghs);
 
-       inode = gfs2_createi(ghs, &dentry->d_name, mode);
+       inode = gfs2_createi(ghs, &dentry->d_name, mode, dev);
        if (IS_ERR(inode)) {
                gfs2_holder_uninit(ghs);
                return PTR_ERR(inode);
        }
 
-       ip = ghs[1].gh_gl->gl_object;
-
-       ip->i_di.di_major = major;
-       ip->i_di.di_minor = minor;
-
-       error = gfs2_meta_inode_buffer(ip, &dibh);
-
-       if (!gfs2_assert_withdraw(sdp, !error)) {
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
-               brelse(dibh);
-       }
-
        gfs2_trans_end(sdp);
        if (dip->i_alloc.al_rgd)
                gfs2_inplace_release(dip);
@@ -592,11 +562,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
 
        /* Make sure we aren't trying to move a dirctory into it's subdir */
 
-       if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) {
+       if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) {
                dir_rename = 1;
 
-               error = gfs2_glock_nq_init(sdp->sd_rename_gl,
-                                          LM_ST_EXCLUSIVE, 0,
+               error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0,
                                           &r_gh);
                if (error)
                        goto out;
@@ -637,10 +606,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                if (error)
                        goto out_gunlock;
 
-               if (S_ISDIR(nip->i_di.di_mode)) {
+               if (S_ISDIR(nip->i_inode.i_mode)) {
                        if (nip->i_di.di_entries < 2) {
                                if (gfs2_consist_inode(nip))
-                                       gfs2_dinode_print(&nip->i_di);
+                                       gfs2_dinode_print(nip);
                                error = -EIO;
                                goto out_gunlock;
                        }
@@ -666,7 +635,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                };
 
                if (odip != ndip) {
-                       if (!ndip->i_di.di_nlink) {
+                       if (!ndip->i_inode.i_nlink) {
                                error = -EINVAL;
                                goto out_gunlock;
                        }
@@ -674,8 +643,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                                error = -EFBIG;
                                goto out_gunlock;
                        }
-                       if (S_ISDIR(ip->i_di.di_mode) &&
-                           ndip->i_di.di_nlink == (u32)-1) {
+                       if (S_ISDIR(ip->i_inode.i_mode) &&
+                           ndip->i_inode.i_nlink == (u32)-1) {
                                error = -EMLINK;
                                goto out_gunlock;
                        }
@@ -702,8 +671,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                if (error)
                        goto out_alloc;
 
-               error = gfs2_quota_check(ndip, ndip->i_di.di_uid,
-                                        ndip->i_di.di_gid);
+               error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
                if (error)
                        goto out_gunlock_q;
 
@@ -729,7 +697,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
        /* Remove the target file, if it exists */
 
        if (nip) {
-               if (S_ISDIR(nip->i_di.di_mode))
+               if (S_ISDIR(nip->i_inode.i_mode))
                        error = gfs2_rmdiri(ndip, &ndentry->d_name, nip);
                else {
                        error = gfs2_dir_del(ndip, &ndentry->d_name);
@@ -760,9 +728,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                error = gfs2_meta_inode_buffer(ip, &dibh);
                if (error)
                        goto out_end_trans;
-               ip->i_di.di_ctime = get_seconds();
+               ip->i_inode.i_ctime.tv_sec = get_seconds();
                gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-               gfs2_dinode_out(&ip->i_di, dibh->b_data);
+               gfs2_dinode_out(ip, dibh->b_data);
                brelse(dibh);
        }
 
@@ -771,7 +739,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                goto out_end_trans;
 
        error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num,
-                            IF2DT(ip->i_di.di_mode));
+                            IF2DT(ip->i_inode.i_mode));
        if (error)
                goto out_end_trans;
 
@@ -867,6 +835,10 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
  * @mask:
  * @nd: passed from Linux VFS, ignored by us
  *
+ * This may be called from the VFS directly, or from within GFS2 with the
+ * inode locked, so we look to see if the glock is already locked and only
+ * lock the glock if its not already been done.
+ *
  * Returns: errno
  */
 
@@ -875,15 +847,18 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder i_gh;
        int error;
+       int unlock = 0;
 
-       if (ip->i_vn == ip->i_gl->gl_vn)
-               return generic_permission(inode, mask, gfs2_check_acl);
+       if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+               error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+               if (error)
+                       return error;
+               unlock = 1;
+       }
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
-       if (!error) {
-               error = generic_permission(inode, mask, gfs2_check_acl_locked);
+       error = generic_permission(inode, mask, gfs2_check_acl);
+       if (unlock)
                gfs2_glock_dq_uninit(&i_gh);
-       }
 
        return error;
 }
@@ -914,8 +889,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
        u32 ouid, ogid, nuid, ngid;
        int error;
 
-       ouid = ip->i_di.di_uid;
-       ogid = ip->i_di.di_gid;
+       ouid = inode->i_uid;
+       ogid = inode->i_gid;
        nuid = attr->ia_uid;
        ngid = attr->ia_gid;
 
@@ -946,10 +921,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
 
        error = inode_setattr(inode, attr);
        gfs2_assert_warn(sdp, !error);
-       gfs2_inode_attr_out(ip);
 
        gfs2_trans_add_bh(ip->i_gl, dibh, 1);
-       gfs2_dinode_out(&ip->i_di, dibh->b_data);
+       gfs2_dinode_out(ip, dibh->b_data);
        brelse(dibh);
 
        if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
@@ -1018,6 +992,12 @@ out:
  * @dentry: The dentry to stat
  * @stat: The inode's stats
  *
+ * This may be called from the VFS directly, or from within GFS2 with the
+ * inode locked, so we look to see if the glock is already locked and only
+ * lock the glock if its not already been done. Note that its the NFS
+ * readdirplus operation which causes this to be called (from filldir)
+ * with the glock already held.
+ *
  * Returns: errno
  */
 
@@ -1028,14 +1008,20 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int error;
+       int unlock = 0;
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
-       if (!error) {
-               generic_fillattr(inode, stat);
-               gfs2_glock_dq_uninit(&gh);
+       if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
+               error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
+               if (error)
+                       return error;
+               unlock = 1;
        }
 
-       return error;
+       generic_fillattr(inode, stat);
+       if (unlock);
+               gfs2_glock_dq_uninit(&gh);
+
+       return 0;
 }
 
 static int gfs2_setxattr(struct dentry *dentry, const char *name,
index b47d9598c047e23324c5860cc9798e41d6ad72cb..7685b46f934b4ad07c419a4b11b4eef60aa0c167 100644 (file)
@@ -157,7 +157,8 @@ static void gfs2_write_super(struct super_block *sb)
 static int gfs2_sync_fs(struct super_block *sb, int wait)
 {
        sb->s_dirt = 0;
-       gfs2_log_flush(sb->s_fs_info, NULL);
+       if (wait)
+               gfs2_log_flush(sb->s_fs_info, NULL);
        return 0;
 }
 
@@ -215,7 +216,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_inode->i_sb;
        struct gfs2_sbd *sdp = sb->s_fs_info;
-       struct gfs2_statfs_change sc;
+       struct gfs2_statfs_change_host sc;
        int error;
 
        if (gfs2_tune_get(sdp, gt_statfs_slow))
@@ -293,8 +294,6 @@ static void gfs2_clear_inode(struct inode *inode)
         */
        if (inode->i_private) {
                struct gfs2_inode *ip = GFS2_I(inode);
-               gfs2_glock_inode_squish(inode);
-               gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
                ip->i_gl->gl_object = NULL;
                gfs2_glock_schedule_for_reclaim(ip->i_gl);
                gfs2_glock_put(ip->i_gl);
@@ -395,7 +394,7 @@ static void gfs2_delete_inode(struct inode *inode)
        if (!inode->i_private)
                goto out;
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh);
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh);
        if (unlikely(error)) {
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
                goto out;
@@ -407,7 +406,7 @@ static void gfs2_delete_inode(struct inode *inode)
        if (error)
                goto out_uninit;
 
-       if (S_ISDIR(ip->i_di.di_mode) &&
+       if (S_ISDIR(inode->i_mode) &&
            (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
                error = gfs2_dir_exhash_dealloc(ip);
                if (error)
index 5453d2947ab3a40d7743925834349575f733536a..45a5f11fc39a88b0a53953e590cae739c4ad920b 100644 (file)
@@ -76,7 +76,7 @@ static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
        if (error)
                goto out;
 
-       error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
+       error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
        if (error)
                goto out_gunlock_q;
 
index a3deae7416c92d9ed8d78beae04dbea92165a73e..d0db881b55d2cf34231335d1c2848f1fd131af30 100644 (file)
@@ -452,19 +452,19 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return 0;
 
-       error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
+       error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
        if (error)
                goto out;
        al->al_qd_num++;
        qd++;
 
-       error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
+       error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
        if (error)
                goto out;
        al->al_qd_num++;
        qd++;
 
-       if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
+       if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
                error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
                if (error)
                        goto out;
@@ -472,7 +472,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
                qd++;
        }
 
-       if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
+       if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
                error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
                if (error)
                        goto out;
@@ -539,8 +539,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
                qc->qc_id = cpu_to_be32(qd->qd_id);
        }
 
-       x = qc->qc_change;
-       x = be64_to_cpu(x) + change;
+       x = be64_to_cpu(qc->qc_change) + change;
        qc->qc_change = cpu_to_be64(x);
 
        spin_lock(&sdp->sd_quota_spin);
@@ -743,7 +742,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
        struct gfs2_holder i_gh;
-       struct gfs2_quota q;
+       struct gfs2_quota_host q;
        char buf[sizeof(struct gfs2_quota)];
        struct file_ra_state ra_state;
        int error;
@@ -1103,7 +1102,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
 
                for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
                     y++, slot++) {
-                       struct gfs2_quota_change qc;
+                       struct gfs2_quota_change_host qc;
                        struct gfs2_quota_data *qd;
 
                        gfs2_quota_change_in(&qc, bh->b_data +
index 62cd223819b7aa4536335ef460ed9382ac90eb12..d0c806b85c864387cffec4a7a82a9dd8d5deb329 100644 (file)
@@ -132,10 +132,11 @@ void gfs2_revoke_clean(struct gfs2_sbd *sdp)
  */
 
 static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
-                         struct gfs2_log_header *head)
+                         struct gfs2_log_header_host *head)
 {
        struct buffer_head *bh;
-       struct gfs2_log_header lh;
+       struct gfs2_log_header_host lh;
+       const u32 nothing = 0;
        u32 hash;
        int error;
 
@@ -143,11 +144,11 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
        if (error)
                return error;
 
-       memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header));
-       lh.lh_hash = 0;
-       hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header));
+       hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
+                                            sizeof(u32));
+       hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
+       hash ^= (u32)~0;
        gfs2_log_header_in(&lh, bh->b_data);
-
        brelse(bh);
 
        if (lh.lh_header.mh_magic != GFS2_MAGIC ||
@@ -174,7 +175,7 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
  */
 
 static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
-                       struct gfs2_log_header *head)
+                       struct gfs2_log_header_host *head)
 {
        unsigned int orig_blk = *blk;
        int error;
@@ -205,10 +206,10 @@ static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
  * Returns: errno
  */
 
-static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
        unsigned int blk = head->lh_blkno;
-       struct gfs2_log_header lh;
+       struct gfs2_log_header_host lh;
        int error;
 
        for (;;) {
@@ -245,9 +246,9 @@ static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
  * Returns: errno
  */
 
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
-       struct gfs2_log_header lh_1, lh_m;
+       struct gfs2_log_header_host lh_1, lh_m;
        u32 blk_1, blk_2, blk_m;
        int error;
 
@@ -320,7 +321,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
                length = be32_to_cpu(ld->ld_length);
 
                if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
-                       struct gfs2_log_header lh;
+                       struct gfs2_log_header_host lh;
                        error = get_log_header(jd, start, &lh);
                        if (!error) {
                                gfs2_replay_incr_blk(sdp, &start);
@@ -363,7 +364,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
  * Returns: errno
  */
 
-static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
+static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
 {
        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
@@ -425,7 +426,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
 {
        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
        struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
-       struct gfs2_log_header head;
+       struct gfs2_log_header_host head;
        struct gfs2_holder j_gh, ji_gh, t_gh;
        unsigned long t;
        int ro = 0;
index 961feedf4d8bb026949d8bac19542c2c651281a2..f7235e61c723bfe7ac106df41242785ff42de4ee 100644 (file)
@@ -26,7 +26,7 @@ int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
 void gfs2_revoke_clean(struct gfs2_sbd *sdp);
 
 int gfs2_find_jhead(struct gfs2_jdesc *jd,
-                   struct gfs2_log_header *head);
+                   struct gfs2_log_header_host *head);
 int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
 void gfs2_check_journals(struct gfs2_sbd *sdp);
 
index b261385c006556d7633ed24cb8a34773ca02cfac..ff0846528d5475cb9cc6ec5021c8aad42a96128c 100644 (file)
@@ -253,7 +253,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
 
 }
 
-static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block)
+static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block)
 {
        u64 first = ri->ri_data0;
        u64 last = first + ri->ri_data;
@@ -1217,7 +1217,7 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip)
        al->al_alloced++;
 
        gfs2_statfs_change(sdp, 0, -1, 0);
-       gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
+       gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
 
        spin_lock(&sdp->sd_rindex_spin);
        rgd->rd_free_clone--;
@@ -1261,7 +1261,7 @@ u64 gfs2_alloc_meta(struct gfs2_inode *ip)
        al->al_alloced++;
 
        gfs2_statfs_change(sdp, 0, -1, 0);
-       gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
+       gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
        gfs2_trans_add_unrevoke(sdp, block);
 
        spin_lock(&sdp->sd_rindex_spin);
@@ -1337,8 +1337,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
        gfs2_trans_add_rg(rgd);
 
        gfs2_statfs_change(sdp, 0, +blen, 0);
-       gfs2_quota_change(ip, -(s64)blen,
-                        ip->i_di.di_uid, ip->i_di.di_gid);
+       gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
 }
 
 /**
@@ -1366,7 +1365,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
        gfs2_trans_add_rg(rgd);
 
        gfs2_statfs_change(sdp, 0, +blen, 0);
-       gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid);
+       gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
        gfs2_meta_wipe(ip, bstart, blen);
 }
 
@@ -1411,7 +1410,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
 {
        gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
-       gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
+       gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
        gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
 }
 
index 6a78b1b32e25597fac6a2ef0e0f13d7a2c3be78e..43a24f2e5905f9a26be19bcc032b4761a777baef 100644 (file)
@@ -97,7 +97,7 @@ void gfs2_tune_init(struct gfs2_tune *gt)
  * changed.
  */
 
-int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent)
+int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
 {
        unsigned int x;
 
@@ -180,6 +180,24 @@ static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
        return 0;
 }
 
+/**
+ * gfs2_read_super - Read the gfs2 super block from disk
+ * @sb: The VFS super block
+ * @sector: The location of the super block
+ *
+ * This uses the bio functions to read the super block from disk
+ * because we want to be 100% sure that we never read cached data.
+ * A super block is read twice only during each GFS2 mount and is
+ * never written to by the filesystem. The first time its read no
+ * locks are held, and the only details which are looked at are those
+ * relating to the locking protocol. Once locking is up and working,
+ * the sb is read again under the lock to establish the location of
+ * the master directory (contains pointers to journals etc) and the
+ * root directory.
+ *
+ * Returns: A page containing the sb or NULL
+ */
+
 struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
 {
        struct page *page;
@@ -199,7 +217,7 @@ struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
                return NULL;
        }
 
-       bio->bi_sector = sector;
+       bio->bi_sector = sector * (sb->s_blocksize >> 9);
        bio->bi_bdev = sb->s_bdev;
        bio_add_page(bio, page, PAGE_SIZE, 0);
 
@@ -508,7 +526,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
        struct gfs2_glock *j_gl = ip->i_gl;
        struct gfs2_holder t_gh;
-       struct gfs2_log_header head;
+       struct gfs2_log_header_host head;
        int error;
 
        error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
@@ -517,7 +535,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
                return error;
 
        gfs2_meta_cache_flush(ip);
-       j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
+       j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
 
        error = gfs2_find_jhead(sdp->sd_jdesc, &head);
        if (error)
@@ -587,9 +605,9 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
 int gfs2_statfs_init(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
-       struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
+       struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-       struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+       struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
        struct buffer_head *m_bh, *l_bh;
        struct gfs2_holder gh;
        int error;
@@ -634,7 +652,7 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
                        s64 dinodes)
 {
        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-       struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+       struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
        struct buffer_head *l_bh;
        int error;
 
@@ -660,8 +678,8 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
        struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
-       struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
-       struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+       struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+       struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
        struct gfs2_holder gh;
        struct buffer_head *m_bh, *l_bh;
        int error;
@@ -727,10 +745,10 @@ out:
  * Returns: errno
  */
 
-int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
+int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
 {
-       struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
-       struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
+       struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
+       struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
 
        spin_lock(&sdp->sd_statfs_spin);
 
@@ -760,7 +778,7 @@ int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
  */
 
 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
-                           struct gfs2_statfs_change *sc)
+                           struct gfs2_statfs_change_host *sc)
 {
        gfs2_rgrp_verify(rgd);
        sc->sc_total += rgd->rd_ri.ri_data;
@@ -782,7 +800,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  * Returns: errno
  */
 
-int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
+int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
 {
        struct gfs2_holder ri_gh;
        struct gfs2_rgrpd *rgd_next;
@@ -792,7 +810,7 @@ int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
        int done;
        int error = 0, err;
 
-       memset(sc, 0, sizeof(struct gfs2_statfs_change));
+       memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
        gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
        if (!gha)
                return -ENOMEM;
@@ -873,7 +891,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
        struct gfs2_jdesc *jd;
        struct lfcc *lfcc;
        LIST_HEAD(list);
-       struct gfs2_log_header lh;
+       struct gfs2_log_header_host lh;
        int error;
 
        error = gfs2_jindex_hold(sdp, &ji_gh);
index 5bb443ae0f5902c5ab51437d6b04294aa3d2e1b1..e590b2df11dc1c88343743132a64ece6ae83ddd9 100644 (file)
@@ -14,7 +14,7 @@
 
 void gfs2_tune_init(struct gfs2_tune *gt);
 
-int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent);
+int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
 int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
 struct page *gfs2_read_super(struct super_block *sb, sector_t sector);
 
@@ -45,8 +45,8 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp);
 void gfs2_statfs_change(struct gfs2_sbd *sdp,
                        s64 total, s64 free, s64 dinodes);
 int gfs2_statfs_sync(struct gfs2_sbd *sdp);
-int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
-int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
+int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
+int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
 
 int gfs2_freeze_fs(struct gfs2_sbd *sdp);
 void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
index 0e0ec988f731c5ffd19306fcf34679c05ffa03d2..983eaf1e06becb3e0c6516bb04ffa397bc121ab9 100644 (file)
@@ -426,9 +426,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
 }                                                                             \
 TUNE_ATTR_2(name, name##_store)
 
-TUNE_ATTR(ilimit, 0);
-TUNE_ATTR(ilimit_tries, 0);
-TUNE_ATTR(ilimit_min, 0);
 TUNE_ATTR(demote_secs, 0);
 TUNE_ATTR(incore_log_blocks, 0);
 TUNE_ATTR(log_flush_secs, 0);
@@ -447,7 +444,6 @@ TUNE_ATTR(quota_simul_sync, 1);
 TUNE_ATTR(quota_cache_secs, 1);
 TUNE_ATTR(max_atomic_write, 1);
 TUNE_ATTR(stall_secs, 1);
-TUNE_ATTR(entries_per_readdir, 1);
 TUNE_ATTR(greedy_default, 1);
 TUNE_ATTR(greedy_quantum, 1);
 TUNE_ATTR(greedy_max, 1);
@@ -459,9 +455,6 @@ TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
 
 static struct attribute *tune_attrs[] = {
-       &tune_attr_ilimit.attr,
-       &tune_attr_ilimit_tries.attr,
-       &tune_attr_ilimit_min.attr,
        &tune_attr_demote_secs.attr,
        &tune_attr_incore_log_blocks.attr,
        &tune_attr_log_flush_secs.attr,
@@ -478,7 +471,6 @@ static struct attribute *tune_attrs[] = {
        &tune_attr_quota_cache_secs.attr,
        &tune_attr_max_atomic_write.attr,
        &tune_attr_stall_secs.attr,
-       &tune_attr_entries_per_readdir.attr,
        &tune_attr_greedy_default.attr,
        &tune_attr_greedy_quantum.attr,
        &tune_attr_greedy_max.attr,
index 196c604faadcfe16132fba320f070bbd88138bc8..e5707a9f78c2091901813acee8d4db23a1e06029 100644 (file)
@@ -23,9 +23,9 @@
 #include "lm.h"
 #include "util.h"
 
-kmem_cache_t *gfs2_glock_cachep __read_mostly;
-kmem_cache_t *gfs2_inode_cachep __read_mostly;
-kmem_cache_t *gfs2_bufdata_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_inode_cachep __read_mostly;
+struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
 
 void gfs2_assert_i(struct gfs2_sbd *sdp)
 {
index 76a50899fe9e2252074a3aa844723d545802581c..28938a46cf4785c63544ff4bea34aa5ef6052b32 100644 (file)
@@ -83,8 +83,7 @@ static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
                                    char *file, unsigned int line)
 {
        struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
-       u32 magic = mh->mh_magic;
-       magic = be32_to_cpu(magic);
+       u32 magic = be32_to_cpu(mh->mh_magic);
        if (unlikely(magic != GFS2_MAGIC))
                return gfs2_meta_check_ii(sdp, bh, "magic number", function,
                                          file, line);
@@ -107,9 +106,8 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
                                        char *file, unsigned int line)
 {
        struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
-       u32 magic = mh->mh_magic;
+       u32 magic = be32_to_cpu(mh->mh_magic);
        u16 t = be32_to_cpu(mh->mh_type);
-       magic = be32_to_cpu(magic);
        if (unlikely(magic != GFS2_MAGIC))
                return gfs2_meta_check_ii(sdp, bh, "magic number", function,
                                          file, line);
@@ -146,9 +144,9 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
 gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
 
 
-extern kmem_cache_t *gfs2_glock_cachep;
-extern kmem_cache_t *gfs2_inode_cachep;
-extern kmem_cache_t *gfs2_bufdata_cachep;
+extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_inode_cachep;
+extern struct kmem_cache *gfs2_bufdata_cachep;
 
 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
                                           unsigned int *p)
index 85b17b3fa4a0f768e8f6155800b582243a74f965..a3698796600439413d4d0536cd9edf8f2184201a 100644 (file)
@@ -24,7 +24,7 @@
 #include "hfs_fs.h"
 #include "btree.h"
 
-static kmem_cache_t *hfs_inode_cachep;
+static struct kmem_cache *hfs_inode_cachep;
 
 MODULE_LICENSE("GPL");
 
@@ -145,7 +145,7 @@ static struct inode *hfs_alloc_inode(struct super_block *sb)
 {
        struct hfs_inode_info *i;
 
-       i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL);
+       i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
        return i ? &i->vfs_inode : NULL;
 }
 
@@ -430,7 +430,7 @@ static struct file_system_type hfs_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
        struct hfs_inode_info *i = p;
 
index 194eede52fa46005949042c37d8e57e3ef8fe7db..0f513c6bf8434adfc311c57e6829e9b3c23663e6 100644 (file)
@@ -434,13 +434,13 @@ MODULE_AUTHOR("Brad Boyer");
 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t *hfsplus_inode_cachep;
+static struct kmem_cache *hfsplus_inode_cachep;
 
 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
 {
        struct hfsplus_inode_info *i;
 
-       i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL);
+       i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
        return i ? &i->vfs_inode : NULL;
 }
 
@@ -467,7 +467,7 @@ static struct file_system_type hfsplus_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
        struct hfsplus_inode_info *i = p;
 
index ecc9180645ae1a12b70beab00bc5561f602c4a65..594f9c428fc20227ad8dcfcaffecaeeb7de143e8 100644 (file)
@@ -84,7 +84,8 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
                if (!fno->dirflag) {
                        e = 1;
-                       hpfs_error(inode->i_sb, "not a directory, fnode %08x",inode->i_ino);
+                       hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
+                                       (unsigned long)inode->i_ino);
                }
                if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
                        e = 1;
@@ -144,8 +145,11 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                }
                if (de->first || de->last) {
                        if (hpfs_sb(inode->i_sb)->sb_chk) {
-                               if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08x", old_pos);
-                               if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08x", old_pos);
+                               if (de->first && !de->last && (de->namelen != 2
+                                   || de ->name[0] != 1 || de->name[1] != 1))
+                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos);
+                               if (de->last && (de->namelen != 1 || de ->name[0] != 255))
+                                       hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos);
                        }
                        hpfs_brelse4(&qbh);
                        goto again;
index 229ff2fb1809f7b6276c66655b47679f013225c6..fe83c2b7d2d8bb8899ca47e0f90ce5741ff3bbc4 100644 (file)
@@ -533,10 +533,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                        struct buffer_head *bh;
                        struct dnode *d1;
                        struct quad_buffer_head qbh1;
-                       if (hpfs_sb(i->i_sb)->sb_chk) if (up != i->i_ino) {
-                               hpfs_error(i->i_sb, "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08x", dno, up, i->i_ino);
+                       if (hpfs_sb(i->i_sb)->sb_chk)
+                           if (up != i->i_ino) {
+                               hpfs_error(i->i_sb,
+                                       "bad pointer to fnode, dnode %08x, pointing to %08x, should be %08lx",
+                                       dno, up, (unsigned long)i->i_ino);
                                return;
-                       }
+                           }
                        if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
                                d1->up = up;
                                d1->root_dnode = 1;
@@ -851,7 +854,9 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
        /* Going to the next dirent */
        if ((d = de_next_de(de)) < dnode_end_de(dnode)) {
                if (!(++*posp & 077)) {
-                       hpfs_error(inode->i_sb, "map_pos_dirent: pos crossed dnode boundary; pos = %08x", *posp);
+                       hpfs_error(inode->i_sb,
+                               "map_pos_dirent: pos crossed dnode boundary; pos = %08llx",
+                               (unsigned long long)*posp);
                        goto bail;
                }
                /* We're going down the tree */
index 66339dc030e4003849facc162d9822fa0c90e9e0..547a8384571fcc9d1b95282933879125db098efa 100644 (file)
@@ -243,8 +243,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, char *key, char *data
                fnode->ea_offs = 0xc4;
        }
        if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
-               hpfs_error(s, "fnode %08x: ea_offs == %03x, ea_size_s == %03x",
-                       inode->i_ino, fnode->ea_offs, fnode->ea_size_s);
+               hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
+                       (unsigned long)inode->i_ino,
+                       fnode->ea_offs, fnode->ea_size_s);
                return;
        }
        if ((fnode->ea_size_s || !fnode->ea_size_l) &&
index 32ab51e42b967e374726e9d5681c2970f55520e3..1c07aa82d32787ef3e7751c1f04c11ce0e8e8483 100644 (file)
@@ -317,7 +317,8 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
 
 /* super.c */
 
-void hpfs_error(struct super_block *, char *, ...);
+void hpfs_error(struct super_block *, const char *, ...)
+       __attribute__((format (printf, 2, 3)));
 int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
 unsigned hpfs_count_one_bitmap(struct super_block *, secno);
 
index 7faef8544f320bd67933fe560992800235c36728..85d3e1d9ac000072df33c32b52da5e16394cc676 100644 (file)
@@ -251,7 +251,10 @@ void hpfs_write_inode_nolock(struct inode *i)
                        de->file_size = 0;
                        hpfs_mark_4buffers_dirty(&qbh);
                        hpfs_brelse4(&qbh);
-               } else hpfs_error(i->i_sb, "directory %08x doesn't have '.' entry", i->i_ino);
+               } else
+                       hpfs_error(i->i_sb,
+                               "directory %08lx doesn't have '.' entry",
+                               (unsigned long)i->i_ino);
        }
        mark_buffer_dirty(bh);
        brelse(bh);
index 0fecdac22e4e9602c5721d6d162105a9bdc7fee0..c4724589b2eba1854f9d5afc7a274c1a9beed066 100644 (file)
@@ -126,32 +126,40 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                        struct extended_attribute *ea;
                        struct extended_attribute *ea_end;
                        if (fnode->magic != FNODE_MAGIC) {
-                               hpfs_error(s, "bad magic on fnode %08x", ino);
+                               hpfs_error(s, "bad magic on fnode %08lx",
+                                       (unsigned long)ino);
                                goto bail;
                        }
                        if (!fnode->dirflag) {
                                if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
                                    (fnode->btree.internal ? 12 : 8)) {
-                                       hpfs_error(s, "bad number of nodes in fnode %08x", ino);
+                                       hpfs_error(s,
+                                          "bad number of nodes in fnode %08lx",
+                                           (unsigned long)ino);
                                        goto bail;
                                }
                                if (fnode->btree.first_free !=
                                    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
-                                       hpfs_error(s, "bad first_free pointer in fnode %08x", ino);
+                                       hpfs_error(s,
+                                           "bad first_free pointer in fnode %08lx",
+                                           (unsigned long)ino);
                                        goto bail;
                                }
                        }
                        if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
                           (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
-                               hpfs_error(s, "bad EA info in fnode %08x: ea_offs == %04x ea_size_s == %04x",
-                                       ino, fnode->ea_offs, fnode->ea_size_s);
+                               hpfs_error(s,
+                                       "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
+                                       (unsigned long)ino,
+                                       fnode->ea_offs, fnode->ea_size_s);
                                goto bail;
                        }
                        ea = fnode_ea(fnode);
                        ea_end = fnode_end_ea(fnode);
                        while (ea != ea_end) {
                                if (ea > ea_end) {
-                                       hpfs_error(s, "bad EA in fnode %08x", ino);
+                                       hpfs_error(s, "bad EA in fnode %08lx",
+                                               (unsigned long)ino);
                                        goto bail;
                                }
                                ea = next_ea(ea);
index 450b5e0b4785c59431b002e5aa1db2b33aed70b8..d4abc1a1d56648b82ab3c8f8d8c8043ab03c95d9 100644 (file)
@@ -46,21 +46,17 @@ static void unmark_dirty(struct super_block *s)
 }
 
 /* Filesystem error... */
+static char err_buf[1024];
 
-#define ERR_BUF_SIZE 1024
-
-void hpfs_error(struct super_block *s, char *m,...)
+void hpfs_error(struct super_block *s, const char *fmt, ...)
 {
-       char *buf;
-       va_list l;
-       va_start(l, m);
-       if (!(buf = kmalloc(ERR_BUF_SIZE, GFP_KERNEL)))
-               printk("HPFS: No memory for error message '%s'\n",m);
-       else if (vsprintf(buf, m, l) >= ERR_BUF_SIZE)
-               printk("HPFS: Grrrr... Kernel memory corrupted ... going on, but it'll crash very soon :-(\n");
-       printk("HPFS: filesystem error: ");
-       if (buf) printk("%s", buf);
-       else printk("%s\n",m);
+       va_list args;
+
+       va_start(args, fmt);
+       vsnprintf(err_buf, sizeof(err_buf), fmt, args);
+       va_end(args);
+
+       printk("HPFS: filesystem error: %s", err_buf);
        if (!hpfs_sb(s)->sb_was_error) {
                if (hpfs_sb(s)->sb_err == 2) {
                        printk("; crashing the system because you wanted it\n");
@@ -76,7 +72,6 @@ void hpfs_error(struct super_block *s, char *m,...)
                } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
                else printk("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n");
        } else printk("\n");
-       kfree(buf);
        hpfs_sb(s)->sb_was_error = 1;
 }
 
@@ -160,12 +155,12 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
-static kmem_cache_t * hpfs_inode_cachep;
+static struct kmem_cache * hpfs_inode_cachep;
 
 static struct inode *hpfs_alloc_inode(struct super_block *sb)
 {
        struct hpfs_inode_info *ei;
-       ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, SLAB_NOFS);
+       ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
        if (!ei)
                return NULL;
        ei->vfs_inode.i_version = 1;
@@ -177,7 +172,7 @@ static void hpfs_destroy_inode(struct inode *inode)
        kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
 
index 7f4756963d05e067464dcb9570db9ebba54b1170..0706f5aac6a24b2df7daf8a7c83b633f9fde6fae 100644 (file)
@@ -513,7 +513,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
 }
 
 
-static kmem_cache_t *hugetlbfs_inode_cachep;
+static struct kmem_cache *hugetlbfs_inode_cachep;
 
 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
 {
@@ -522,7 +522,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
 
        if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
                return NULL;
-       p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL);
+       p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
        if (unlikely(!p)) {
                hugetlbfs_inc_free_inodes(sbinfo);
                return NULL;
@@ -545,7 +545,7 @@ static const struct address_space_operations hugetlbfs_aops = {
 };
 
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
 
index 26cdb115ce67d5361222d39cf5e94039db654eec..9ecccab7326db2e032d52a0c4e3865a30c6354ee 100644 (file)
@@ -97,7 +97,7 @@ static DEFINE_MUTEX(iprune_mutex);
  */
 struct inodes_stat_t inodes_stat;
 
-static kmem_cache_t * inode_cachep __read_mostly;
+static struct kmem_cache * inode_cachep __read_mostly;
 
 static struct inode *alloc_inode(struct super_block *sb)
 {
@@ -109,7 +109,7 @@ static struct inode *alloc_inode(struct super_block *sb)
        if (sb->s_op->alloc_inode)
                inode = sb->s_op->alloc_inode(sb);
        else
-               inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
+               inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
 
        if (inode) {
                struct address_space * const mapping = &inode->i_data;
@@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
 
 EXPORT_SYMBOL(inode_init_once);
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct inode * inode = (struct inode *) foo;
 
@@ -1242,9 +1242,6 @@ EXPORT_SYMBOL(inode_needs_sync);
  */
 #ifdef CONFIG_QUOTA
 
-/* Function back in dquot.c */
-int remove_inode_dquot_ref(struct inode *, int, struct list_head *);
-
 void remove_dquot_ref(struct super_block *sb, int type,
                        struct list_head *tofree_head)
 {
index 017cb0f134d6aa600f5c37296b6c786bacb6cf7c..e1956e6f116c8a4c9d6de121a22ec8f463f69373 100644 (file)
@@ -34,8 +34,8 @@
 
 #include <asm/ioctls.h>
 
-static kmem_cache_t *watch_cachep __read_mostly;
-static kmem_cache_t *event_cachep __read_mostly;
+static struct kmem_cache *watch_cachep __read_mostly;
+static struct kmem_cache *event_cachep __read_mostly;
 
 static struct vfsmount *inotify_mnt __read_mostly;
 
index c34b862cdbf204cf5e7124408659f89108289a82..ea55b6c469ecdc8f0cb97a050f5b1d9ad0d04038 100644 (file)
@@ -57,12 +57,12 @@ static void isofs_put_super(struct super_block *sb)
 static void isofs_read_inode(struct inode *);
 static int isofs_statfs (struct dentry *, struct kstatfs *);
 
-static kmem_cache_t *isofs_inode_cachep;
+static struct kmem_cache *isofs_inode_cachep;
 
 static struct inode *isofs_alloc_inode(struct super_block *sb)
 {
        struct iso_inode_info *ei;
-       ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL);
+       ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode)
        kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct iso_inode_info *ei = foo;
 
index b85c686b60dbc7f58c32e04740dcb7052056544e..10fff94439387ab98b55dabb8d6f1af1e7a5384a 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1630,7 +1630,7 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
        "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
 };
@@ -1693,7 +1693,7 @@ void jbd_slab_free(void *ptr,  size_t size)
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static struct kmem_cache *journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -1996,7 +1996,7 @@ static void __exit remove_jbd_proc_entry(void)
 
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+struct kmem_cache *jbd_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
index c532429d8d9b365e9b540255e4f684071a9e0894..d204ab394f36bb0001c36d22b88fa0a1217c21f1 100644 (file)
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *revoke_record_cache;
-static kmem_cache_t *revoke_table_cache;
+static struct kmem_cache *revoke_record_cache;
+static struct kmem_cache *revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
index 4f82bcd63e488a6a1b0ff1b8bb20bb7e2a968b3c..d38e0d575e48d3d0554384a52277be8f4469b1c0 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * get_transaction: obtain a new transaction_t object.
  *
@@ -1499,7 +1501,7 @@ __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
  *
  * Called under j_list_lock.  The journal may not be locked.
  */
-void __journal_temp_unlink_buffer(struct journal_head *jh)
+static void __journal_temp_unlink_buffer(struct journal_head *jh)
 {
        struct journal_head **list = NULL;
        transaction_t *transaction;
index 70b2ae1ef2810e1a8fea2a557bfa5f5e7a395190..6bd8005e3d34434d95fac5037d79d4042af34678 100644 (file)
@@ -248,8 +248,12 @@ write_out_data:
                                bufs = 0;
                                goto write_out_data;
                        }
-               }
-               else {
+               } else if (!locked && buffer_locked(bh)) {
+                       __jbd2_journal_file_buffer(jh, commit_transaction,
+                                               BJ_Locked);
+                       jbd_unlock_bh_state(bh);
+                       put_bh(bh);
+               } else {
                        BUFFER_TRACE(bh, "writeout complete: unfile");
                        __jbd2_journal_unfile_buffer(jh);
                        jbd_unlock_bh_state(bh);
index c60f378b0f7670dec45afd81cd576cb030a0a324..44fc32bfd7f1d2d847bb3b84c423b00a9d07f8b8 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
@@ -1641,7 +1641,7 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
 #define JBD_MAX_SLABS 5
 #define JBD_SLAB_INDEX(size)  (size >> 11)
 
-static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
 static const char *jbd_slab_names[JBD_MAX_SLABS] = {
        "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
 };
@@ -1704,7 +1704,7 @@ void jbd2_slab_free(void *ptr,  size_t size)
 /*
  * Journal_head storage management
  */
-static kmem_cache_t *jbd2_journal_head_cache;
+static struct kmem_cache *jbd2_journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -2007,7 +2007,7 @@ static void __exit jbd2_remove_jbd_proc_entry(void)
 
 #endif
 
-kmem_cache_t *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
index 380d19917f3732ad198edd47947f6fcfad662d5f..f506646ad0ffaa17369e68c4120e18b1aff7003f 100644 (file)
@@ -70,8 +70,8 @@
 #include <linux/init.h>
 #endif
 
-static kmem_cache_t *jbd2_revoke_record_cache;
-static kmem_cache_t *jbd2_revoke_table_cache;
+static struct kmem_cache *jbd2_revoke_record_cache;
+static struct kmem_cache *jbd2_revoke_table_cache;
 
 /* Each revoke record represents one single revoked block.  During
    journal replay, this involves recording the transaction ID of the
index c051a94c8a979106a326cd3e3fb931095dc8305f..3a8700153cb087bdade5965990201eeddee9da5c 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 
+static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+
 /*
  * jbd2_get_transaction: obtain a new transaction_t object.
  *
index 3f7899ea4cba9389742e6cb85d441ce9ff8c2535..9f15bce92022d939fdd829cd38face2ae1935365 100644 (file)
@@ -61,8 +61,8 @@ static const struct file_operations jffs_dir_operations;
 static struct inode_operations jffs_dir_inode_operations;
 static const struct address_space_operations jffs_address_operations;
 
-kmem_cache_t     *node_cache = NULL;
-kmem_cache_t     *fm_cache = NULL;
+struct kmem_cache     *node_cache = NULL;
+struct kmem_cache     *fm_cache = NULL;
 
 /* Called by the VFS at mount time to initialize the whole file system.  */
 static int jffs_fill_super(struct super_block *sb, void *data, int silent)
index 4a543e1149700b08141d6769814848ea9295e2c9..d0e783f199eafd0a76e42b6f97cc01385d89cbea 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/smp_lock.h>
 #include <linux/time.h>
 #include <linux/ctype.h>
+#include <linux/freezer.h>
 
 #include "intrep.h"
 #include "jffs_fm.h"
@@ -591,7 +592,7 @@ jffs_add_virtual_root(struct jffs_control *c)
        D2(printk("jffs_add_virtual_root(): "
                  "Creating a virtual root directory.\n"));
 
-       if (!(root = kmalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
+       if (!(root = kzalloc(sizeof(struct jffs_file), GFP_KERNEL))) {
                return -ENOMEM;
        }
        no_jffs_file++;
@@ -603,7 +604,6 @@ jffs_add_virtual_root(struct jffs_control *c)
        DJM(no_jffs_node++);
        memset(node, 0, sizeof(struct jffs_node));
        node->ino = JFFS_MIN_INO;
-       memset(root, 0, sizeof(struct jffs_file));
        root->ino = JFFS_MIN_INO;
        root->mode = S_IFDIR | S_IRWXU | S_IRGRP
                     | S_IXGRP | S_IROTH | S_IXOTH;
index 29b68d939bd9e9f6f9c8bd0572d1b157f2476dcd..077258b2103e5bac0d386a46102f4741292e5b2d 100644 (file)
@@ -29,8 +29,8 @@ static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
 static struct jffs_fm *jffs_alloc_fm(void);
 static void jffs_free_fm(struct jffs_fm *n);
 
-extern kmem_cache_t     *fm_cache;
-extern kmem_cache_t     *node_cache;
+extern struct kmem_cache     *fm_cache;
+extern struct kmem_cache     *node_cache;
 
 #if CONFIG_JFFS_FS_VERBOSE > 0
 void
index ff2a872e80e78677de43baaf8acc758299106ec0..6eb3daebd56354ca4973093c9d5966c7e148ae0a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/completion.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 #include "nodelist.h"
 
 
index 33f291005012bedf622daaaaa4ebf5f77655d2a1..83f9881ec4cc970a2cd61e267b5fd46a78dd9deb 100644 (file)
 
 /* These are initialised to NULL in the kernel startup code.
    If you're porting to other operating systems, beware */
-static kmem_cache_t *full_dnode_slab;
-static kmem_cache_t *raw_dirent_slab;
-static kmem_cache_t *raw_inode_slab;
-static kmem_cache_t *tmp_dnode_info_slab;
-static kmem_cache_t *raw_node_ref_slab;
-static kmem_cache_t *node_frag_slab;
-static kmem_cache_t *inode_cache_slab;
+static struct kmem_cache *full_dnode_slab;
+static struct kmem_cache *raw_dirent_slab;
+static struct kmem_cache *raw_inode_slab;
+static struct kmem_cache *tmp_dnode_info_slab;
+static struct kmem_cache *raw_node_ref_slab;
+static struct kmem_cache *node_frag_slab;
+static struct kmem_cache *inode_cache_slab;
 #ifdef CONFIG_JFFS2_FS_XATTR
-static kmem_cache_t *xattr_datum_cache;
-static kmem_cache_t *xattr_ref_cache;
+static struct kmem_cache *xattr_datum_cache;
+static struct kmem_cache *xattr_ref_cache;
 #endif
 
 int __init jffs2_create_slab_caches(void)
index bc4b8106a49010bf8144bfbe3a0e6532860eacb4..7deb7825402156fcd5f04d47b518452df2945a7f 100644 (file)
 
 static void jffs2_put_super(struct super_block *);
 
-static kmem_cache_t *jffs2_inode_cachep;
+static struct kmem_cache *jffs2_inode_cachep;
 
 static struct inode *jffs2_alloc_inode(struct super_block *sb)
 {
        struct jffs2_inode_info *ei;
-       ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+       ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode)
        kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
 }
 
-static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
 
index b89c9aba046635b90ec977a9415ddb8ef79a2f8f..5065baa530b601d2d4ee28a5247ff7bc23e09b60 100644 (file)
@@ -67,7 +67,7 @@
 #include <linux/kthread.h>
 #include <linux/buffer_head.h>         /* for sync_blockdev() */
 #include <linux/bio.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/delay.h>
 #include <linux/mutex.h>
 #include "jfs_incore.h"
index 0cccd1c39d750e199185c4b1d7f7004f52246142..b1a1c7296014de6dc0da37af7b4cfade89639eec 100644 (file)
@@ -74,7 +74,7 @@ static inline void lock_metapage(struct metapage *mp)
 }
 
 #define METAPOOL_MIN_PAGES 32
-static kmem_cache_t *metapage_cache;
+static struct kmem_cache *metapage_cache;
 static mempool_t *metapage_mempool;
 
 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
@@ -180,7 +180,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
 
 #endif
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct metapage *mp = (struct metapage *)foo;
 
index 81f6f04af192ae45c64d3022591522b527342a38..d558e51b0df8ffe4217b091f68a26bc7389dfcc2 100644 (file)
@@ -46,7 +46,7 @@
 #include <linux/vmalloc.h>
 #include <linux/smp_lock.h>
 #include <linux/completion.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kthread.h>
index 9c1c6e0e633d365dca62a9726037c5cfefcbaa2c..846ac8f34513e64f49eefc11d446b8b8bfbad37a 100644 (file)
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
 MODULE_LICENSE("GPL");
 
-static kmem_cache_t * jfs_inode_cachep;
+static struct kmem_cache * jfs_inode_cachep;
 
 static struct super_operations jfs_super_operations;
 static struct export_operations jfs_export_operations;
@@ -93,7 +93,7 @@ void jfs_error(struct super_block *sb, const char * function, ...)
        va_list args;
 
        va_start(args, function);
-       vsprintf(error_buf, function, args);
+       vsnprintf(error_buf, sizeof(error_buf), function, args);
        va_end(args);
 
        printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
@@ -748,7 +748,7 @@ static struct file_system_type jfs_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
 
index 3d84f600b633693f37078a28ee9fb5e31290b378..497c3cd59d527c64055c21579930c7ec43fcfcd5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/utsname.h>
 #include <linux/smp_lock.h>
+#include <linux/freezer.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/lockd/lockd.h>
@@ -729,7 +730,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
                goto retry_cancel;
        }
 
-       dprintk("lockd: cancel status %d (task %d)\n",
+       dprintk("lockd: cancel status %u (task %u)\n",
                        req->a_res.status, task->tk_pid);
 
        switch (req->a_res.status) {
index fb24a97303458f1b282f81111895c105e2c0399f..3d4610c2a2665f9ce7236988f6bd40c52d3623bd 100644 (file)
@@ -36,34 +36,14 @@ static DEFINE_MUTEX(nlm_host_mutex);
 static void                    nlm_gc_hosts(void);
 static struct nsm_handle *     __nsm_find(const struct sockaddr_in *,
                                        const char *, int, int);
-
-/*
- * Find an NLM server handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
-                       const char *hostname, int hostname_len)
-{
-       return nlm_lookup_host(0, sin, proto, version,
-                              hostname, hostname_len);
-}
-
-/*
- * Find an NLM client handle in the cache. If there is none, create it.
- */
-struct nlm_host *
-nlmsvc_lookup_host(struct svc_rqst *rqstp,
-                       const char *hostname, int hostname_len)
-{
-       return nlm_lookup_host(1, &rqstp->rq_addr,
-                              rqstp->rq_prot, rqstp->rq_vers,
-                              hostname, hostname_len);
-}
+static struct nsm_handle *     nsm_find(const struct sockaddr_in *sin,
+                                        const char *hostname,
+                                        int hostname_len);
 
 /*
  * Common host lookup routine for server & client
  */
-struct nlm_host *
+static struct nlm_host *
 nlm_lookup_host(int server, const struct sockaddr_in *sin,
                                        int proto, int version,
                                        const char *hostname,
@@ -194,6 +174,29 @@ nlm_destroy_host(struct nlm_host *host)
        kfree(host);
 }
 
+/*
+ * Find an NLM server handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
+                       const char *hostname, int hostname_len)
+{
+       return nlm_lookup_host(0, sin, proto, version,
+                              hostname, hostname_len);
+}
+
+/*
+ * Find an NLM client handle in the cache. If there is none, create it.
+ */
+struct nlm_host *
+nlmsvc_lookup_host(struct svc_rqst *rqstp,
+                       const char *hostname, int hostname_len)
+{
+       return nlm_lookup_host(1, &rqstp->rq_addr,
+                              rqstp->rq_prot, rqstp->rq_vers,
+                              hostname, hostname_len);
+}
+
 /*
  * Create the NLM RPC client for an NLM peer
  */
@@ -495,7 +498,7 @@ out:
        return nsm;
 }
 
-struct nsm_handle *
+static struct nsm_handle *
 nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
 {
        return __nsm_find(sin, hostname, hostname_len, 1);
index 0ce5c81ff5078076e3fd43f659fa0de561370917..f67146a8199a0a98149407080dd83582357dc52f 100644 (file)
@@ -234,7 +234,7 @@ nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
  */
 static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
 {
-       dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
+       dprintk("lockd: %5u callback returned %d\n", task->tk_pid,
                        -task->tk_status);
 }
 
index 32e99a6e8dcad6c20964d63d3dcb5d460423b495..3707c3a23e9330173b1d50fdbb1548691064f688 100644 (file)
@@ -263,7 +263,7 @@ nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
  */
 static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
 {
-       dprintk("lockd: %4d callback returned %d\n", task->tk_pid,
+       dprintk("lockd: %5u callback returned %d\n", task->tk_pid,
                        -task->tk_status);
 }
 
index e0b6a80649a01ff79c26206c039ece3d4faebd0c..1cb0c57fedbd42bbce5c0d3fefabbf2b395eeb67 100644 (file)
@@ -142,12 +142,12 @@ int lease_break_time = 45;
 static LIST_HEAD(file_lock_list);
 static LIST_HEAD(blocked_list);
 
-static kmem_cache_t *filelock_cache __read_mostly;
+static struct kmem_cache *filelock_cache __read_mostly;
 
 /* Allocate an empty lock structure. */
 static struct file_lock *locks_alloc_lock(void)
 {
-       return kmem_cache_alloc(filelock_cache, SLAB_KERNEL);
+       return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
 }
 
 static void locks_release_private(struct file_lock *fl)
@@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock);
  * Initialises the fields of the file lock which are invariant for
  * free file_locks.
  */
-static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
 {
        struct file_lock *lock = (struct file_lock *) foo;
 
index 0ff71256e65b4fb0d3a9fc4352c0a8c9b4b5e365..deeb9dc062d9dfdc74676734163e8c273f6aa274 100644 (file)
@@ -85,7 +85,7 @@ struct mb_cache {
 #ifndef MB_CACHE_INDEXES_COUNT
        int                             c_indexes_count;
 #endif
-       kmem_cache_t                    *c_entry_cache;
+       struct kmem_cache                       *c_entry_cache;
        struct list_head                *c_block_hash;
        struct list_head                *c_indexes_hash[0];
 };
index 1e36bae4d0eb1a4d8a8b17140c3216fad2d5c81b..629e09b38c5cf67b23a4fe1eee0190616f44ccb6 100644 (file)
@@ -51,12 +51,12 @@ static void minix_put_super(struct super_block *sb)
        return;
 }
 
-static kmem_cache_t * minix_inode_cachep;
+static struct kmem_cache * minix_inode_cachep;
 
 static struct inode *minix_alloc_inode(struct super_block *sb)
 {
        struct minix_inode_info *ei;
-       ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL);
+       ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -67,7 +67,7 @@ static void minix_destroy_inode(struct inode *inode)
        kmem_cache_free(minix_inode_cachep, minix_i(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct minix_inode_info *ei = (struct minix_inode_info *) foo;
 
index 28d49b301d5516bbdfb5bf7651f3c51b8f370f31..db1bca26d88cf07d4758b7ccf02d8334b91e1208 100644 (file)
@@ -249,9 +249,11 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
 
        /*
         * MAY_EXEC on regular files requires special handling: We override
-        * filesystem execute permissions if the mode bits aren't set.
+        * filesystem execute permissions if the mode bits aren't set or
+        * the fs is mounted with the "noexec" flag.
         */
-       if ((mask & MAY_EXEC) && S_ISREG(mode) && !(mode & S_IXUGO))
+       if ((mask & MAY_EXEC) && S_ISREG(mode) && (!(mode & S_IXUGO) ||
+                       (nd && nd->mnt && (nd->mnt->mnt_flags & MNT_NOEXEC))))
                return -EACCES;
 
        /* Ordinary permission routines do not understand MAY_APPEND. */
@@ -1996,8 +1998,7 @@ asmlinkage long sys_mkdir(const char __user *pathname, int mode)
 void dentry_unhash(struct dentry *dentry)
 {
        dget(dentry);
-       if (atomic_read(&dentry->d_count))
-               shrink_dcache_parent(dentry);
+       shrink_dcache_parent(dentry);
        spin_lock(&dcache_lock);
        spin_lock(&dentry->d_lock);
        if (atomic_read(&dentry->d_count) == 2)
index 55442a6cf22133d2c9687636b3e3ca788eacac53..b00ac84ebbdd4545d494f4a98046296f9e2add10 100644 (file)
@@ -36,7 +36,7 @@ static int event;
 
 static struct list_head *mount_hashtable __read_mostly;
 static int hash_mask __read_mostly, hash_bits __read_mostly;
-static kmem_cache_t *mnt_cache __read_mostly;
+static struct kmem_cache *mnt_cache __read_mostly;
 static struct rw_semaphore namespace_sem;
 
 /* /sys/fs */
index 42e3bef270c9d0c029475800fec5023229dfe512..fae53243bb92148e1fdf0affa94250217d514581 100644 (file)
@@ -40,12 +40,12 @@ static void ncp_delete_inode(struct inode *);
 static void ncp_put_super(struct super_block *);
 static int  ncp_statfs(struct dentry *, struct kstatfs *);
 
-static kmem_cache_t * ncp_inode_cachep;
+static struct kmem_cache * ncp_inode_cachep;
 
 static struct inode *ncp_alloc_inode(struct super_block *sb)
 {
        struct ncp_inode_info *ei;
-       ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL);
+       ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -56,7 +56,7 @@ static void ncp_destroy_inode(struct inode *inode)
        kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;
 
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                server->rcv.ptr = (unsigned char*)&server->rcv.buf;
                server->rcv.len = 10;
                server->rcv.state = 0;
-               INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server);
-               INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server);
+               INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc);
+               INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc);
                sock->sk->sk_write_space = ncp_tcp_write_space;
        } else {
-               INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server);
-               INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server);
+               INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc);
+               INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc);
                server->timeout_tm.data = (unsigned long)server;
                server->timeout_tm.function = ncpdgram_timeout_call;
        }
index 11c2b252ebedd9f63f5da6b5f8d2003bd1baccfa..e496d8b65e92d50141139cec9519353e0a953793 100644 (file)
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void *
        }
 }
 
-void ncpdgram_rcv_proc(void *s)
+void ncpdgram_rcv_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, rcv.tq);
        struct socket* sock;
        
        sock = server->ncp_sock;
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server)
        }
 }
 
-void ncpdgram_timeout_proc(void *s)
+void ncpdgram_timeout_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, timeout_tq);
        mutex_lock(&server->rcv.creq_mutex);
        __ncpdgram_timeout_proc(server);
        mutex_unlock(&server->rcv.creq_mutex);
@@ -652,18 +654,20 @@ skipdata:;
        }
 }
 
-void ncp_tcp_rcv_proc(void *s)
+void ncp_tcp_rcv_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, rcv.tq);
 
        mutex_lock(&server->rcv.creq_mutex);
        __ncptcp_rcv_proc(server);
        mutex_unlock(&server->rcv.creq_mutex);
 }
 
-void ncp_tcp_tx_proc(void *s)
+void ncp_tcp_tx_proc(struct work_struct *work)
 {
-       struct ncp_server *server = s;
+       struct ncp_server *server =
+               container_of(work, struct ncp_server, tx.tq);
        
        mutex_lock(&server->rcv.creq_mutex);
        __ncptcp_try_send(server);
index 5fea638743e4107aadcd8cbd18cd46d78e331cf0..23ab145daa2d2289cd6721457b3f555a9d16cbaf 100644 (file)
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname,
        INIT_LIST_HEAD(&clp->cl_state_owners);
        INIT_LIST_HEAD(&clp->cl_unused);
        spin_lock_init(&clp->cl_lock);
-       INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
+       INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
        clp->cl_boot_time = CURRENT_TIME;
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
index bdfabf854a519af44ae5f07e83ee64e3e0089c6a..f9d678f4ae06cff66aa86e8ca58425a2ba779a5c 100644 (file)
@@ -58,7 +58,7 @@
 
 #define NFSDBG_FACILITY                NFSDBG_VFS
 
-static kmem_cache_t *nfs_direct_cachep;
+static struct kmem_cache *nfs_direct_cachep;
 
 /*
  * This represents a set of asynchronous requests that we're waiting on
@@ -143,7 +143,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 {
        struct nfs_direct_req *dreq;
 
-       dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
+       dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
        if (!dreq)
                return NULL;
 
@@ -307,9 +307,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
 
                data->task.tk_cookie = (unsigned long) inode;
 
-               lock_kernel();
                rpc_execute(&data->task);
-               unlock_kernel();
 
                dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
                                data->task.tk_pid,
@@ -475,9 +473,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
 
        dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
 
-       lock_kernel();
        rpc_execute(&data->task);
-       unlock_kernel();
 }
 
 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
@@ -641,9 +637,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
                data->task.tk_priority = RPC_PRIORITY_NORMAL;
                data->task.tk_cookie = (unsigned long) inode;
 
-               lock_kernel();
                rpc_execute(&data->task);
-               unlock_kernel();
 
                dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
                                data->task.tk_pid,
index cc93865cea932a4e9c78ea95e1c0165e575630d1..8e28bffc35a0618f2b10c7807d6babc46c63a466 100644 (file)
@@ -307,28 +307,28 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse
 
 static void nfs_invalidate_page(struct page *page, unsigned long offset)
 {
-       struct inode *inode = page->mapping->host;
-
+       if (offset != 0)
+               return;
        /* Cancel any unstarted writes on this page */
-       if (offset == 0)
-               nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE);
+       nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
 }
 
 static int nfs_release_page(struct page *page, gfp_t gfp)
 {
-       if (gfp & __GFP_FS)
-               return !nfs_wb_page(page->mapping->host, page);
-       else
-               /*
-                * Avoid deadlock on nfs_wait_on_request().
-                */
+       /*
+        * Avoid deadlock on nfs_wait_on_request().
+        */
+       if (!(gfp & __GFP_FS))
                return 0;
+       /* Hack... Force nfs_wb_page() to write out the page */
+       SetPageDirty(page);
+       return !nfs_wb_page(page->mapping->host, page);
 }
 
 const struct address_space_operations nfs_file_aops = {
        .readpage = nfs_readpage,
        .readpages = nfs_readpages,
-       .set_page_dirty = __set_page_dirty_nobuffers,
+       .set_page_dirty = nfs_set_page_dirty,
        .writepage = nfs_writepage,
        .writepages = nfs_writepages,
        .prepare_write = nfs_prepare_write,
@@ -375,6 +375,12 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
 
        nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
        result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       /* Return error values for O_SYNC and IS_SYNC() */
+       if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) {
+               int err = nfs_fsync(iocb->ki_filp, dentry, 1);
+               if (err < 0)
+                       result = err;
+       }
 out:
        return result;
 
index 08cc4c5919abee92f1f28b411e1154c627ff15d3..36680d1061b0d1a314eb57abc802a4e5404adc36 100644 (file)
@@ -55,7 +55,7 @@ static int nfs_update_inode(struct inode *, struct nfs_fattr *);
 
 static void nfs_zap_acl_cache(struct inode *);
 
-static kmem_cache_t * nfs_inode_cachep;
+static struct kmem_cache * nfs_inode_cachep;
 
 static inline unsigned long
 nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
@@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
        int err;
 
        /* Flush out writes to the server in order to update c/mtime */
-       nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT);
+       nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT);
 
        /*
         * We may force a getattr if the user cares about atime.
@@ -1080,7 +1080,7 @@ void nfs4_clear_inode(struct inode *inode)
 struct inode *nfs_alloc_inode(struct super_block *sb)
 {
        struct nfs_inode *nfsi;
-       nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL);
+       nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
        if (!nfsi)
                return NULL;
        nfsi->flags = 0UL;
@@ -1111,7 +1111,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
 #endif
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct nfs_inode *nfsi = (struct nfs_inode *) foo;
 
index d205466233f67f932460d51f124cade1bec0a516..a28f6ce2e131e4df24e40223ebdc36d7d986da6f 100644 (file)
@@ -217,3 +217,21 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
        if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
 }
+
+/*
+ * Determine the number of bytes of data the page contains
+ */
+static inline
+unsigned int nfs_page_length(struct page *page)
+{
+       loff_t i_size = i_size_read(page->mapping->host);
+
+       if (i_size > 0) {
+               pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+               if (page->index < end_index)
+                       return PAGE_CACHE_SIZE;
+               if (page->index == end_index)
+                       return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+       }
+       return 0;
+}
index ec1114b33d8954a3d2436646d9d735112e240e0e..371b804e7cc8a66e2e6f20c00356799e5b6c51cc 100644 (file)
 
 #define NFSDBG_FACILITY                NFSDBG_VFS
 
-static void nfs_expire_automounts(void *list);
+static void nfs_expire_automounts(struct work_struct *work);
 
 LIST_HEAD(nfs_automount_list);
-static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
 int nfs_mountpoint_expiry_timeout = 500 * HZ;
 
 static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = {
        .follow_link    = nfs_follow_mountpoint,
 };
 
-static void nfs_expire_automounts(void *data)
+static void nfs_expire_automounts(struct work_struct *work)
 {
-       struct list_head *list = (struct list_head *)data;
+       struct list_head *list = &nfs_automount_list;
 
        mark_mounts_for_expiry(list);
        if (!list_empty(list))
index e5f128ffc32dd7ee368946768f9ce762a44c4fe7..510ae524f3fd7dfd3e8e5fbd91f1b166964126c1 100644 (file)
@@ -276,51 +276,6 @@ static int nfs3_proc_read(struct nfs_read_data *rdata)
        return status;
 }
 
-static int nfs3_proc_write(struct nfs_write_data *wdata)
-{
-       int                     rpcflags = wdata->flags;
-       struct inode *          inode = wdata->inode;
-       struct nfs_fattr *      fattr = wdata->res.fattr;
-       struct rpc_message      msg = {
-               .rpc_proc       = &nfs3_procedures[NFS3PROC_WRITE],
-               .rpc_argp       = &wdata->args,
-               .rpc_resp       = &wdata->res,
-               .rpc_cred       = wdata->cred,
-       };
-       int                     status;
-
-       dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-                       (long long) wdata->args.offset);
-       nfs_fattr_init(fattr);
-       status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
-       if (status >= 0)
-               nfs_post_op_update_inode(inode, fattr);
-       dprintk("NFS reply write: %d\n", status);
-       return status < 0? status : wdata->res.count;
-}
-
-static int nfs3_proc_commit(struct nfs_write_data *cdata)
-{
-       struct inode *          inode = cdata->inode;
-       struct nfs_fattr *      fattr = cdata->res.fattr;
-       struct rpc_message      msg = {
-               .rpc_proc       = &nfs3_procedures[NFS3PROC_COMMIT],
-               .rpc_argp       = &cdata->args,
-               .rpc_resp       = &cdata->res,
-               .rpc_cred       = cdata->cred,
-       };
-       int                     status;
-
-       dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
-                       (long long) cdata->args.offset);
-       nfs_fattr_init(fattr);
-       status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
-       if (status >= 0)
-               nfs_post_op_update_inode(inode, fattr);
-       dprintk("NFS reply commit: %d\n", status);
-       return status;
-}
-
 /*
  * Create a regular file.
  * For now, we don't implement O_EXCL.
@@ -369,7 +324,7 @@ again:
 
        /* If the server doesn't support the exclusive creation semantics,
         * try again with simple 'guarded' mode. */
-       if (status == NFSERR_NOTSUPP) {
+       if (status == -ENOTSUPP) {
                switch (arg.createmode) {
                        case NFS3_CREATE_EXCLUSIVE:
                                arg.createmode = NFS3_CREATE_GUARDED;
@@ -690,8 +645,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        };
        int                     status;
 
-       lock_kernel();
-
        if (plus)
                msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
 
@@ -702,7 +655,6 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        nfs_refresh_inode(dir, &dir_attr);
        dprintk("NFS reply readdir: %d\n", status);
-       unlock_kernel();
        return status;
 }
 
@@ -904,8 +856,6 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
        .access         = nfs3_proc_access,
        .readlink       = nfs3_proc_readlink,
        .read           = nfs3_proc_read,
-       .write          = nfs3_proc_write,
-       .commit         = nfs3_proc_commit,
        .create         = nfs3_proc_create,
        .remove         = nfs3_proc_remove,
        .unlink_setup   = nfs3_proc_unlink_setup,
index 6f346677332db4c99e07c867849ad7a0a705427a..c26cd978c7cce4f3b8576e62bbdd03860c6ce3f0 100644 (file)
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2];
 extern void nfs4_schedule_state_renewal(struct nfs_client *);
 extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
 extern void nfs4_kill_renewd(struct nfs_client *);
-extern void nfs4_renew_state(void *);
+extern void nfs4_renew_state(struct work_struct *);
 
 /* nfs4state.c */
 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
index 8118036cc4494c3a420869820cdebdb66c2c6262..ee458aeab24a5ef1ebc1ad8aad1410cd2d64abd9 100644 (file)
@@ -636,7 +636,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
                smp_wmb();
        } else
                status = data->rpc_status;
-       rpc_release_task(task);
+       rpc_put_task(task);
        return status;
 }
 
@@ -742,7 +742,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
                smp_wmb();
        } else
                status = data->rpc_status;
-       rpc_release_task(task);
+       rpc_put_task(task);
        if (status != 0)
                return status;
 
@@ -1775,89 +1775,6 @@ static int nfs4_proc_read(struct nfs_read_data *rdata)
        return err;
 }
 
-static int _nfs4_proc_write(struct nfs_write_data *wdata)
-{
-       int rpcflags = wdata->flags;
-       struct inode *inode = wdata->inode;
-       struct nfs_fattr *fattr = wdata->res.fattr;
-       struct nfs_server *server = NFS_SERVER(inode);
-       struct rpc_message msg = {
-               .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_WRITE],
-               .rpc_argp       = &wdata->args,
-               .rpc_resp       = &wdata->res,
-               .rpc_cred       = wdata->cred,
-       };
-       int status;
-
-       dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-                       (long long) wdata->args.offset);
-
-       wdata->args.bitmask = server->attr_bitmask;
-       wdata->res.server = server;
-       wdata->timestamp = jiffies;
-       nfs_fattr_init(fattr);
-       status = rpc_call_sync(server->client, &msg, rpcflags);
-       dprintk("NFS reply write: %d\n", status);
-       if (status < 0)
-               return status;
-       renew_lease(server, wdata->timestamp);
-       nfs_post_op_update_inode(inode, fattr);
-       return wdata->res.count;
-}
-
-static int nfs4_proc_write(struct nfs_write_data *wdata)
-{
-       struct nfs4_exception exception = { };
-       int err;
-       do {
-               err = nfs4_handle_exception(NFS_SERVER(wdata->inode),
-                               _nfs4_proc_write(wdata),
-                               &exception);
-       } while (exception.retry);
-       return err;
-}
-
-static int _nfs4_proc_commit(struct nfs_write_data *cdata)
-{
-       struct inode *inode = cdata->inode;
-       struct nfs_fattr *fattr = cdata->res.fattr;
-       struct nfs_server *server = NFS_SERVER(inode);
-       struct rpc_message msg = {
-               .rpc_proc       = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
-               .rpc_argp       = &cdata->args,
-               .rpc_resp       = &cdata->res,
-               .rpc_cred       = cdata->cred,
-       };
-       int status;
-
-       dprintk("NFS call  commit %d @ %Ld\n", cdata->args.count,
-                       (long long) cdata->args.offset);
-
-       cdata->args.bitmask = server->attr_bitmask;
-       cdata->res.server = server;
-       cdata->timestamp = jiffies;
-       nfs_fattr_init(fattr);
-       status = rpc_call_sync(server->client, &msg, 0);
-       if (status >= 0)
-               renew_lease(server, cdata->timestamp);
-       dprintk("NFS reply commit: %d\n", status);
-       if (status >= 0)
-               nfs_post_op_update_inode(inode, fattr);
-       return status;
-}
-
-static int nfs4_proc_commit(struct nfs_write_data *cdata)
-{
-       struct nfs4_exception exception = { };
-       int err;
-       do {
-               err = nfs4_handle_exception(NFS_SERVER(cdata->inode),
-                               _nfs4_proc_commit(cdata),
-                               &exception);
-       } while (exception.retry);
-       return err;
-}
-
 /*
  * Got race?
  * We will need to arrange for the VFS layer to provide an atomic open.
@@ -2223,13 +2140,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                        dentry->d_parent->d_name.name,
                        dentry->d_name.name,
                        (unsigned long long)cookie);
-       lock_kernel();
        nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
        res.pgbase = args.pgbase;
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
        if (status == 0)
                memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
-       unlock_kernel();
        dprintk("%s: returns %d\n", __FUNCTION__, status);
        return status;
 }
@@ -3067,7 +2982,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
                if (status == 0)
                        nfs_post_op_update_inode(inode, &data->fattr);
        }
-       rpc_release_task(task);
+       rpc_put_task(task);
        return status;
 }
 
@@ -3314,7 +3229,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
        if (IS_ERR(task))
                goto out;
        status = nfs4_wait_for_completion_rpc_task(task);
-       rpc_release_task(task);
+       rpc_put_task(task);
 out:
        return status;
 }
@@ -3430,7 +3345,7 @@ static void nfs4_lock_release(void *calldata)
                task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
                                data->arg.lock_seqid);
                if (!IS_ERR(task))
-                       rpc_release_task(task);
+                       rpc_put_task(task);
                dprintk("%s: cancelling lock!\n", __FUNCTION__);
        } else
                nfs_free_seqid(data->arg.lock_seqid);
@@ -3472,7 +3387,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
                        ret = -EAGAIN;
        } else
                data->cancelled = 1;
-       rpc_release_task(task);
+       rpc_put_task(task);
        dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret);
        return ret;
 }
@@ -3732,8 +3647,6 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
        .access         = nfs4_proc_access,
        .readlink       = nfs4_proc_readlink,
        .read           = nfs4_proc_read,
-       .write          = nfs4_proc_write,
-       .commit         = nfs4_proc_commit,
        .create         = nfs4_proc_create,
        .remove         = nfs4_proc_remove,
        .unlink_setup   = nfs4_proc_unlink_setup,
index 7b6df1852e7590043540cf9c65d3dca963826b20..823298561c0a9a7904695ce554899165d26f7403 100644 (file)
 #define NFSDBG_FACILITY        NFSDBG_PROC
 
 void
-nfs4_renew_state(void *data)
+nfs4_renew_state(struct work_struct *work)
 {
-       struct nfs_client *clp = (struct nfs_client *)data;
+       struct nfs_client *clp =
+               container_of(work, struct nfs_client, cl_renewd.work);
        struct rpc_cred *cred;
        long lease, timeout;
        unsigned long last, now;
index 829af323f28862f2218ab1d3f5163b5953f6b623..ca4b1d4ff42b0f7d337ad01fa7c1675f959c4134 100644 (file)
 #include <linux/nfs_page.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_mount.h>
+#include <linux/writeback.h>
 
 #define NFS_PARANOIA 1
 
-static kmem_cache_t *nfs_page_cachep;
+static struct kmem_cache *nfs_page_cachep;
 
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
        struct nfs_page *p;
-       p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL);
+       p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
        if (p) {
                memset(p, 0, sizeof(*p));
                INIT_LIST_HEAD(&p->wb_list);
@@ -268,11 +269,10 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
 
 #define NFS_SCAN_MAXENTRIES 16
 /**
- * nfs_scan_lock_dirty - Scan the radix tree for dirty requests
- * @nfsi: NFS inode
+ * nfs_scan_dirty - Scan the radix tree for dirty requests
+ * @mapping: pointer to address space
+ * @wbc: writeback_control structure
  * @dst: Destination list
- * @idx_start: lower bound of page->index to scan
- * @npages: idx_start + npages sets the upper bound to scan.
  *
  * Moves elements from one of the inode request lists.
  * If the number of requests is set to 0, the entire address_space
@@ -280,46 +280,63 @@ nfs_coalesce_requests(struct list_head *head, struct list_head *dst,
  * The requests are *not* checked to ensure that they form a contiguous set.
  * You must be holding the inode's req_lock when calling this function
  */
-int
-nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
-             unsigned long idx_start, unsigned int npages)
+long nfs_scan_dirty(struct address_space *mapping,
+                       struct writeback_control *wbc,
+                       struct list_head *dst)
 {
+       struct nfs_inode *nfsi = NFS_I(mapping->host);
        struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES];
        struct nfs_page *req;
-       unsigned long idx_end;
+       pgoff_t idx_start, idx_end;
+       long res = 0;
        int found, i;
-       int res;
 
-       res = 0;
-       if (npages == 0)
-               idx_end = ~0;
-       else
-               idx_end = idx_start + npages - 1;
+       if (nfsi->ndirty == 0)
+               return 0;
+       if (wbc->range_cyclic) {
+               idx_start = 0;
+               idx_end = ULONG_MAX;
+       } else if (wbc->range_end == 0) {
+               idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+               idx_end = ULONG_MAX;
+       } else {
+               idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+               idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
+       }
 
        for (;;) {
+               unsigned int toscan = NFS_SCAN_MAXENTRIES;
+
                found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree,
-                               (void **)&pgvec[0], idx_start, NFS_SCAN_MAXENTRIES,
+                               (void **)&pgvec[0], idx_start, toscan,
                                NFS_PAGE_TAG_DIRTY);
+
+               /* Did we make progress? */
                if (found <= 0)
                        break;
+
                for (i = 0; i < found; i++) {
                        req = pgvec[i];
-                       if (req->wb_index > idx_end)
+                       if (!wbc->range_cyclic && req->wb_index > idx_end)
                                goto out;
 
+                       /* Try to lock request and mark it for writeback */
+                       if (!nfs_set_page_writeback_locked(req))
+                               goto next;
+                       radix_tree_tag_clear(&nfsi->nfs_page_tree,
+                                       req->wb_index, NFS_PAGE_TAG_DIRTY);
+                       nfsi->ndirty--;
+                       nfs_list_remove_request(req);
+                       nfs_list_add_request(req, dst);
+                       res++;
+                       if (res == LONG_MAX)
+                               goto out;
+next:
                        idx_start = req->wb_index + 1;
-
-                       if (nfs_set_page_writeback_locked(req)) {
-                               radix_tree_tag_clear(&nfsi->nfs_page_tree,
-                                               req->wb_index, NFS_PAGE_TAG_DIRTY);
-                               nfs_list_remove_request(req);
-                               nfs_list_add_request(req, dst);
-                               dec_zone_page_state(req->wb_page, NR_FILE_DIRTY);
-                               res++;
-                       }
                }
        }
 out:
+       WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty));
        return res;
 }
 
index 4529cc4f3f8fe427bb6b262718213f933b7ac6a1..10f5e80ca15789fc91718c2a5859423c30d21bcc 100644 (file)
@@ -215,32 +215,6 @@ static int nfs_proc_read(struct nfs_read_data *rdata)
        return status;
 }
 
-static int nfs_proc_write(struct nfs_write_data *wdata)
-{
-       int                     flags = wdata->flags;
-       struct inode *          inode = wdata->inode;
-       struct nfs_fattr *      fattr = wdata->res.fattr;
-       struct rpc_message      msg = {
-               .rpc_proc       = &nfs_procedures[NFSPROC_WRITE],
-               .rpc_argp       = &wdata->args,
-               .rpc_resp       = &wdata->res,
-               .rpc_cred       = wdata->cred,
-       };
-       int                     status;
-
-       dprintk("NFS call  write %d @ %Ld\n", wdata->args.count,
-                       (long long) wdata->args.offset);
-       nfs_fattr_init(fattr);
-       status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
-       if (status >= 0) {
-               nfs_post_op_update_inode(inode, fattr);
-               wdata->res.count = wdata->args.count;
-               wdata->verf.committed = NFS_FILE_SYNC;
-       }
-       dprintk("NFS reply write: %d\n", status);
-       return status < 0? status : wdata->res.count;
-}
-
 static int
 nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
                int flags, struct nameidata *nd)
@@ -545,13 +519,10 @@ nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        };
        int                     status;
 
-       lock_kernel();
-
        dprintk("NFS call  readdir %d\n", (unsigned int)cookie);
        status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 
        dprintk("NFS reply readdir: %d\n", status);
-       unlock_kernel();
        return status;
 }
 
@@ -696,8 +667,6 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
        .access         = NULL,                /* access */
        .readlink       = nfs_proc_readlink,
        .read           = nfs_proc_read,
-       .write          = nfs_proc_write,
-       .commit         = NULL,                /* commit */
        .create         = nfs_proc_create,
        .remove         = nfs_proc_remove,
        .unlink_setup   = nfs_proc_unlink_setup,
index c2e49c397a27186ac9ba09ebd49710bb091935e8..a9c26521a9e2d8a13965e364dae9ac8392cecdb1 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <asm/system.h>
 
+#include "internal.h"
 #include "iostat.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
@@ -38,7 +39,7 @@ static int nfs_pagein_one(struct list_head *, struct inode *);
 static const struct rpc_call_ops nfs_read_partial_ops;
 static const struct rpc_call_ops nfs_read_full_ops;
 
-static kmem_cache_t *nfs_rdata_cachep;
+static struct kmem_cache *nfs_rdata_cachep;
 static mempool_t *nfs_rdata_mempool;
 
 #define MIN_POOL_READ  (32)
@@ -46,7 +47,7 @@ static mempool_t *nfs_rdata_mempool;
 struct nfs_read_data *nfs_readdata_alloc(size_t len)
 {
        unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
+       struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -65,32 +66,22 @@ struct nfs_read_data *nfs_readdata_alloc(size_t len)
        return p;
 }
 
-static void nfs_readdata_free(struct nfs_read_data *p)
+static void nfs_readdata_rcu_free(struct rcu_head *head)
 {
+       struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
        mempool_free(p, nfs_rdata_mempool);
 }
 
-void nfs_readdata_release(void *data)
+static void nfs_readdata_free(struct nfs_read_data *rdata)
 {
-        nfs_readdata_free(data);
+       call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
 }
 
-static
-unsigned int nfs_page_length(struct inode *inode, struct page *page)
+void nfs_readdata_release(void *data)
 {
-       loff_t i_size = i_size_read(inode);
-       unsigned long idx;
-
-       if (i_size <= 0)
-               return 0;
-       idx = (i_size - 1) >> PAGE_CACHE_SHIFT;
-       if (page->index > idx)
-               return 0;
-       if (page->index != idx)
-               return PAGE_CACHE_SIZE;
-       return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1));
+        nfs_readdata_free(data);
 }
 
 static
@@ -139,12 +130,12 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
 {
        unsigned int    rsize = NFS_SERVER(inode)->rsize;
        unsigned int    count = PAGE_CACHE_SIZE;
-       int             result;
+       int result = -ENOMEM;
        struct nfs_read_data *rdata;
 
        rdata = nfs_readdata_alloc(count);
        if (!rdata)
-               return -ENOMEM;
+               goto out_unlock;
 
        memset(rdata, 0, sizeof(*rdata));
        rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
@@ -212,8 +203,9 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
        result = 0;
 
 io_error:
-       unlock_page(page);
        nfs_readdata_free(rdata);
+out_unlock:
+       unlock_page(page);
        return result;
 }
 
@@ -224,7 +216,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
        struct nfs_page *new;
        unsigned int len;
 
-       len = nfs_page_length(inode, page);
+       len = nfs_page_length(page);
        if (len == 0)
                return nfs_return_empty_page(page);
        new = nfs_create_request(ctx, inode, page, 0, len);
@@ -316,9 +308,7 @@ static void nfs_execute_read(struct nfs_read_data *data)
        sigset_t oldset;
 
        rpc_clnt_sigmask(clnt, &oldset);
-       lock_kernel();
        rpc_execute(&data->task);
-       unlock_kernel();
        rpc_clnt_sigunmask(clnt, &oldset);
 }
 
@@ -454,6 +444,55 @@ nfs_pagein_list(struct list_head *head, int rpages)
        return error;
 }
 
+/*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
+{
+       int status;
+
+       dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid,
+                       task->tk_status);
+
+       status = NFS_PROTO(data->inode)->read_done(task, data);
+       if (status != 0)
+               return status;
+
+       nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+
+       if (task->tk_status == -ESTALE) {
+               set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
+               nfs_mark_for_revalidate(data->inode);
+       }
+       spin_lock(&data->inode->i_lock);
+       NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+       spin_unlock(&data->inode->i_lock);
+       return 0;
+}
+
+static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
+{
+       struct nfs_readargs *argp = &data->args;
+       struct nfs_readres *resp = &data->res;
+
+       if (resp->eof || resp->count == argp->count)
+               return 0;
+
+       /* This is a short read! */
+       nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+       /* Has the server at least made some progress? */
+       if (resp->count == 0)
+               return 0;
+
+       /* Yes, so retry the read at the end of the data */
+       argp->offset += resp->count;
+       argp->pgbase += resp->count;
+       argp->count -= resp->count;
+       rpc_restart_call(task);
+       return -EAGAIN;
+}
+
 /*
  * Handle a read reply that fills part of a page.
  */
@@ -463,12 +502,16 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
        struct nfs_page *req = data->req;
        struct page *page = req->wb_page;
  
-       if (likely(task->tk_status >= 0))
-               nfs_readpage_truncate_uninitialised_page(data);
-       else
-               SetPageError(page);
        if (nfs_readpage_result(task, data) != 0)
                return;
+
+       if (likely(task->tk_status >= 0)) {
+               nfs_readpage_truncate_uninitialised_page(data);
+               if (nfs_readpage_retry(task, data) != 0)
+                       return;
+       }
+       if (unlikely(task->tk_status < 0))
+               SetPageError(page);
        if (atomic_dec_and_test(&req->wb_complete)) {
                if (!PageError(page))
                        SetPageUptodate(page);
@@ -496,25 +539,13 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
        count += base;
        for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
                SetPageUptodate(*pages);
-       if (count != 0)
+       if (count == 0)
+               return;
+       /* Was this a short read? */
+       if (data->res.eof || data->res.count == data->args.count)
                SetPageUptodate(*pages);
 }
 
-static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
-{
-       unsigned int count = data->args.count;
-       unsigned int base = data->args.pgbase;
-       struct page **pages;
-
-       pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
-       base &= ~PAGE_CACHE_MASK;
-       count += base;
-       for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
-               SetPageError(*pages);
-       if (count != 0)
-               SetPageError(*pages);
-}
-
 /*
  * This is the callback from RPC telling us whether a reply was
  * received or some error occurred (timeout or socket shutdown).
@@ -523,19 +554,20 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
 {
        struct nfs_read_data *data = calldata;
 
+       if (nfs_readpage_result(task, data) != 0)
+               return;
        /*
-        * Note: nfs_readpage_result may change the values of
+        * Note: nfs_readpage_retry may change the values of
         * data->args. In the multi-page case, we therefore need
-        * to ensure that we call the next nfs_readpage_set_page_uptodate()
-        * first in the multi-page case.
+        * to ensure that we call nfs_readpage_set_pages_uptodate()
+        * first.
         */
        if (likely(task->tk_status >= 0)) {
                nfs_readpage_truncate_uninitialised_page(data);
                nfs_readpage_set_pages_uptodate(data);
-       } else
-               nfs_readpage_set_pages_error(data);
-       if (nfs_readpage_result(task, data) != 0)
-               return;
+               if (nfs_readpage_retry(task, data) != 0)
+                       return;
+       }
        while (!list_empty(&data->pages)) {
                struct nfs_page *req = nfs_list_entry(data->pages.next);
 
@@ -549,50 +581,6 @@ static const struct rpc_call_ops nfs_read_full_ops = {
        .rpc_release = nfs_readdata_release,
 };
 
-/*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
-{
-       struct nfs_readargs *argp = &data->args;
-       struct nfs_readres *resp = &data->res;
-       int status;
-
-       dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
-               task->tk_pid, task->tk_status);
-
-       status = NFS_PROTO(data->inode)->read_done(task, data);
-       if (status != 0)
-               return status;
-
-       nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count);
-
-       if (task->tk_status < 0) {
-               if (task->tk_status == -ESTALE) {
-                       set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
-                       nfs_mark_for_revalidate(data->inode);
-               }
-       } else if (resp->count < argp->count && !resp->eof) {
-               /* This is a short read! */
-               nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
-               /* Has the server at least made some progress? */
-               if (resp->count != 0) {
-                       /* Yes, so retry the read at the end of the data */
-                       argp->offset += resp->count;
-                       argp->pgbase += resp->count;
-                       argp->count -= resp->count;
-                       rpc_restart_call(task);
-                       return -EAGAIN;
-               }
-               task->tk_status = -EIO;
-       }
-       spin_lock(&data->inode->i_lock);
-       NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
-       spin_unlock(&data->inode->i_lock);
-       return 0;
-}
-
 /*
  * Read a page over NFS.
  * We read the page synchronously in the following case:
@@ -626,9 +614,10 @@ int nfs_readpage(struct file *file, struct page *page)
                goto out_error;
 
        if (file == NULL) {
+               error = -EBADF;
                ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
                if (ctx == NULL)
-                       return -EBADF;
+                       goto out_error;
        } else
                ctx = get_nfs_open_context((struct nfs_open_context *)
                                file->private_data);
@@ -663,7 +652,7 @@ readpage_async_filler(void *data, struct page *page)
        unsigned int len;
 
        nfs_wb_page(inode, page);
-       len = nfs_page_length(inode, page);
+       len = nfs_page_length(page);
        if (len == 0)
                return nfs_return_empty_page(page);
        new = nfs_create_request(desc->ctx, inode, page, 0, len);
index 600bbe630abd99cdc6e8f9074984495630c9adc1..6c686112cc03fa3c3dcea70180653640d95aaeb9 100644 (file)
@@ -33,9 +33,7 @@ static int nfs_symlink_filler(struct inode *inode, struct page *page)
 {
        int error;
 
-       lock_kernel();
        error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
-       unlock_kernel();
        if (error < 0)
                goto error;
        SetPageUptodate(page);
index 883dd4a1c157599284f3bae5127fa69418ec06e1..594eb16879ef744fb50226c5272f26c45f7a596f 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/smp_lock.h>
 
 #include "delegation.h"
+#include "internal.h"
 #include "iostat.h"
 
 #define NFSDBG_FACILITY                NFSDBG_PAGECACHE
  * Local function declarations
  */
 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
-                                           struct inode *,
                                            struct page *,
                                            unsigned int, unsigned int);
+static void nfs_mark_request_dirty(struct nfs_page *req);
 static int nfs_wait_on_write_congestion(struct address_space *, int);
 static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
-static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
-                          unsigned int npages, int how);
+static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
 static const struct rpc_call_ops nfs_write_partial_ops;
 static const struct rpc_call_ops nfs_write_full_ops;
 static const struct rpc_call_ops nfs_commit_ops;
 
-static kmem_cache_t *nfs_wdata_cachep;
+static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
 static mempool_t *nfs_commit_mempool;
 
@@ -93,7 +93,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
 
 struct nfs_write_data *nfs_commit_alloc(void)
 {
-       struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
+       struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -102,17 +102,23 @@ struct nfs_write_data *nfs_commit_alloc(void)
        return p;
 }
 
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_rcu_free(struct rcu_head *head)
 {
+       struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
        mempool_free(p, nfs_commit_mempool);
 }
 
+void nfs_commit_free(struct nfs_write_data *wdata)
+{
+       call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
+}
+
 struct nfs_write_data *nfs_writedata_alloc(size_t len)
 {
        unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
+       struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
 
        if (p) {
                memset(p, 0, sizeof(*p));
@@ -131,18 +137,47 @@ struct nfs_write_data *nfs_writedata_alloc(size_t len)
        return p;
 }
 
-static void nfs_writedata_free(struct nfs_write_data *p)
+static void nfs_writedata_rcu_free(struct rcu_head *head)
 {
+       struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
        if (p && (p->pagevec != &p->page_array[0]))
                kfree(p->pagevec);
        mempool_free(p, nfs_wdata_mempool);
 }
 
+static void nfs_writedata_free(struct nfs_write_data *wdata)
+{
+       call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
+}
+
 void nfs_writedata_release(void *wdata)
 {
        nfs_writedata_free(wdata);
 }
 
+static struct nfs_page *nfs_page_find_request_locked(struct page *page)
+{
+       struct nfs_page *req = NULL;
+
+       if (PagePrivate(page)) {
+               req = (struct nfs_page *)page_private(page);
+               if (req != NULL)
+                       atomic_inc(&req->wb_count);
+       }
+       return req;
+}
+
+static struct nfs_page *nfs_page_find_request(struct page *page)
+{
+       struct nfs_page *req = NULL;
+       spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+
+       spin_lock(req_lock);
+       req = nfs_page_find_request_locked(page);
+       spin_unlock(req_lock);
+       return req;
+}
+
 /* Adjust the file length if we're writing beyond the end */
 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
 {
@@ -164,113 +199,34 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
  */
 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
 {
-       loff_t end_offs;
-
        if (PageUptodate(page))
                return;
        if (base != 0)
                return;
-       if (count == PAGE_CACHE_SIZE) {
-               SetPageUptodate(page);
-               return;
-       }
-
-       end_offs = i_size_read(page->mapping->host) - 1;
-       if (end_offs < 0)
+       if (count != nfs_page_length(page))
                return;
-       /* Is this the last page? */
-       if (page->index != (unsigned long)(end_offs >> PAGE_CACHE_SHIFT))
-               return;
-       /* This is the last page: set PG_uptodate if we cover the entire
-        * extent of the data, then zero the rest of the page.
-        */
-       if (count == (unsigned int)(end_offs & (PAGE_CACHE_SIZE - 1)) + 1) {
+       if (count != PAGE_CACHE_SIZE)
                memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
-               SetPageUptodate(page);
-       }
+       SetPageUptodate(page);
 }
 
-/*
- * Write a page synchronously.
- * Offset is the data offset within the page.
- */
-static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
-               struct page *page, unsigned int offset, unsigned int count,
-               int how)
-{
-       unsigned int    wsize = NFS_SERVER(inode)->wsize;
-       int             result, written = 0;
-       struct nfs_write_data *wdata;
-
-       wdata = nfs_writedata_alloc(wsize);
-       if (!wdata)
-               return -ENOMEM;
-
-       wdata->flags = how;
-       wdata->cred = ctx->cred;
-       wdata->inode = inode;
-       wdata->args.fh = NFS_FH(inode);
-       wdata->args.context = ctx;
-       wdata->args.pages = &page;
-       wdata->args.stable = NFS_FILE_SYNC;
-       wdata->args.pgbase = offset;
-       wdata->args.count = wsize;
-       wdata->res.fattr = &wdata->fattr;
-       wdata->res.verf = &wdata->verf;
-
-       dprintk("NFS:      nfs_writepage_sync(%s/%Ld %d@%Ld)\n",
-               inode->i_sb->s_id,
-               (long long)NFS_FILEID(inode),
-               count, (long long)(page_offset(page) + offset));
-
-       set_page_writeback(page);
-       nfs_begin_data_update(inode);
-       do {
-               if (count < wsize)
-                       wdata->args.count = count;
-               wdata->args.offset = page_offset(page) + wdata->args.pgbase;
-
-               result = NFS_PROTO(inode)->write(wdata);
-
-               if (result < 0) {
-                       /* Must mark the page invalid after I/O error */
-                       ClearPageUptodate(page);
-                       goto io_error;
-               }
-               if (result < wdata->args.count)
-                       printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
-                                       wdata->args.count, result);
-
-               wdata->args.offset += result;
-               wdata->args.pgbase += result;
-               written += result;
-               count -= result;
-               nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
-       } while (count);
-       /* Update file length */
-       nfs_grow_file(page, offset, written);
-       /* Set the PG_uptodate flag? */
-       nfs_mark_uptodate(page, offset, written);
-
-       if (PageError(page))
-               ClearPageError(page);
-
-io_error:
-       nfs_end_data_update(inode);
-       end_page_writeback(page);
-       nfs_writedata_free(wdata);
-       return written ? written : result;
-}
-
-static int nfs_writepage_async(struct nfs_open_context *ctx,
-               struct inode *inode, struct page *page,
+static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
                unsigned int offset, unsigned int count)
 {
        struct nfs_page *req;
+       int ret;
 
-       req = nfs_update_request(ctx, inode, page, offset, count);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       for (;;) {
+               req = nfs_update_request(ctx, page, offset, count);
+               if (!IS_ERR(req))
+                       break;
+               ret = PTR_ERR(req);
+               if (ret != -EBUSY)
+                       return ret;
+               ret = nfs_wb_page(page->mapping->host, page);
+               if (ret != 0)
+                       return ret;
+       }
        /* Update file length */
        nfs_grow_file(page, offset, count);
        /* Set the PG_uptodate flag? */
@@ -288,74 +244,95 @@ static int wb_priority(struct writeback_control *wbc)
        return 0;
 }
 
+/*
+ * Find an associated nfs write request, and prepare to flush it out
+ * Returns 1 if there was no write request, or if the request was
+ * already tagged by nfs_set_page_dirty.Returns 0 if the request
+ * was not tagged.
+ * May also return an error if the user signalled nfs_wait_on_request().
+ */
+static int nfs_page_mark_flush(struct page *page)
+{
+       struct nfs_page *req;
+       spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
+       int ret;
+
+       spin_lock(req_lock);
+       for(;;) {
+               req = nfs_page_find_request_locked(page);
+               if (req == NULL) {
+                       spin_unlock(req_lock);
+                       return 1;
+               }
+               if (nfs_lock_request_dontget(req))
+                       break;
+               /* Note: If we hold the page lock, as is the case in nfs_writepage,
+                *       then the call to nfs_lock_request_dontget() will always
+                *       succeed provided that someone hasn't already marked the
+                *       request as dirty (in which case we don't care).
+                */
+               spin_unlock(req_lock);
+               ret = nfs_wait_on_request(req);
+               nfs_release_request(req);
+               if (ret != 0)
+                       return ret;
+               spin_lock(req_lock);
+       }
+       spin_unlock(req_lock);
+       if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
+               nfs_mark_request_dirty(req);
+               set_page_writeback(page);
+       }
+       ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
+       nfs_unlock_request(req);
+       return ret;
+}
+
 /*
  * Write an mmapped page to the server.
  */
-int nfs_writepage(struct page *page, struct writeback_control *wbc)
+static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
        struct nfs_open_context *ctx;
        struct inode *inode = page->mapping->host;
-       unsigned long end_index;
-       unsigned offset = PAGE_CACHE_SIZE;
-       loff_t i_size = i_size_read(inode);
-       int inode_referenced = 0;
-       int priority = wb_priority(wbc);
+       unsigned offset;
        int err;
 
        nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
        nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
 
-       /*
-        * Note: We need to ensure that we have a reference to the inode
-        *       if we are to do asynchronous writes. If not, waiting
-        *       in nfs_wait_on_request() may deadlock with clear_inode().
-        *
-        *       If igrab() fails here, then it is in any case safe to
-        *       call nfs_wb_page(), since there will be no pending writes.
-        */
-       if (igrab(inode) != 0)
-               inode_referenced = 1;
-       end_index = i_size >> PAGE_CACHE_SHIFT;
-
-       /* Ensure we've flushed out any previous writes */
-       nfs_wb_page_priority(inode, page, priority);
-
-       /* easy case */
-       if (page->index < end_index)
-               goto do_it;
-       /* things got complicated... */
-       offset = i_size & (PAGE_CACHE_SIZE-1);
-
-       /* OK, are we completely out? */
-       err = 0; /* potential race with truncate - ignore */
-       if (page->index >= end_index+1 || !offset)
+       err = nfs_page_mark_flush(page);
+       if (err <= 0)
+               goto out;
+       err = 0;
+       offset = nfs_page_length(page);
+       if (!offset)
                goto out;
-do_it:
+
        ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
        if (ctx == NULL) {
                err = -EBADF;
                goto out;
        }
-       lock_kernel();
-       if (!IS_SYNC(inode) && inode_referenced) {
-               err = nfs_writepage_async(ctx, inode, page, 0, offset);
-               if (!wbc->for_writepages)
-                       nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
-       } else {
-               err = nfs_writepage_sync(ctx, inode, page, 0,
-                                               offset, priority);
-               if (err >= 0) {
-                       if (err != offset)
-                               redirty_page_for_writepage(wbc, page);
-                       err = 0;
-               }
-       }
-       unlock_kernel();
+       err = nfs_writepage_setup(ctx, page, 0, offset);
        put_nfs_open_context(ctx);
+       if (err != 0)
+               goto out;
+       err = nfs_page_mark_flush(page);
+       if (err > 0)
+               err = 0;
 out:
+       if (!wbc->for_writepages)
+               nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc));
+       return err;
+}
+
+int nfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+       int err;
+
+       err = nfs_writepage_locked(page, wbc);
        unlock_page(page);
-       if (inode_referenced)
-               iput(inode);
        return err; 
 }
 
@@ -379,21 +356,18 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
                        return 0;
                nfs_wait_on_write_congestion(mapping, 0);
        }
-       err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
+       err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc));
        if (err < 0)
                goto out;
        nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
-       wbc->nr_to_write -= err;
        if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
                err = nfs_wait_on_requests(inode, 0, 0);
                if (err < 0)
                        goto out;
        }
        err = nfs_commit_inode(inode, wb_priority(wbc));
-       if (err > 0) {
-               wbc->nr_to_write -= err;
+       if (err > 0)
                err = 0;
-       }
 out:
        clear_bit(BDI_write_congested, &bdi->state);
        wake_up_all(&nfs_write_congestion);
@@ -420,6 +394,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
                        nfsi->change_attr++;
        }
        SetPagePrivate(req->wb_page);
+       set_page_private(req->wb_page, (unsigned long)req);
        nfsi->npages++;
        atomic_inc(&req->wb_count);
        return 0;
@@ -436,6 +411,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        BUG_ON (!NFS_WBACK_BUSY(req));
 
        spin_lock(&nfsi->req_lock);
+       set_page_private(req->wb_page, 0);
        ClearPagePrivate(req->wb_page);
        radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
        nfsi->npages--;
@@ -449,33 +425,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        nfs_release_request(req);
 }
 
-/*
- * Find a request
- */
-static inline struct nfs_page *
-_nfs_find_request(struct inode *inode, unsigned long index)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-       struct nfs_page *req;
-
-       req = (struct nfs_page*)radix_tree_lookup(&nfsi->nfs_page_tree, index);
-       if (req)
-               atomic_inc(&req->wb_count);
-       return req;
-}
-
-static struct nfs_page *
-nfs_find_request(struct inode *inode, unsigned long index)
-{
-       struct nfs_page         *req;
-       struct nfs_inode        *nfsi = NFS_I(inode);
-
-       spin_lock(&nfsi->req_lock);
-       req = _nfs_find_request(inode, index);
-       spin_unlock(&nfsi->req_lock);
-       return req;
-}
-
 /*
  * Add a request to the inode's dirty list.
  */
@@ -491,8 +440,14 @@ nfs_mark_request_dirty(struct nfs_page *req)
        nfs_list_add_request(req, &nfsi->dirty);
        nfsi->ndirty++;
        spin_unlock(&nfsi->req_lock);
-       inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
-       mark_inode_dirty(inode);
+       __mark_inode_dirty(inode, I_DIRTY_PAGES);
+}
+
+static void
+nfs_redirty_request(struct nfs_page *req)
+{
+       clear_bit(PG_FLUSHING, &req->wb_flags);
+       __set_page_dirty_nobuffers(req->wb_page);
 }
 
 /*
@@ -501,8 +456,7 @@ nfs_mark_request_dirty(struct nfs_page *req)
 static inline int
 nfs_dirty_request(struct nfs_page *req)
 {
-       struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
-       return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
+       return test_bit(PG_FLUSHING, &req->wb_flags) == 0;
 }
 
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -520,7 +474,7 @@ nfs_mark_request_commit(struct nfs_page *req)
        nfsi->ncommit++;
        spin_unlock(&nfsi->req_lock);
        inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-       mark_inode_dirty(inode);
+       __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 }
 #endif
 
@@ -597,31 +551,6 @@ static void nfs_cancel_commit_list(struct list_head *head)
        }
 }
 
-/*
- * nfs_scan_dirty - Scan an inode for dirty requests
- * @inode: NFS inode to scan
- * @dst: destination list
- * @idx_start: lower bound of page->index to scan.
- * @npages: idx_start + npages sets the upper bound to scan.
- *
- * Moves requests from the inode's dirty page list.
- * The requests are *not* checked to ensure that they form a contiguous set.
- */
-static int
-nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-       int res = 0;
-
-       if (nfsi->ndirty != 0) {
-               res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
-               nfsi->ndirty -= res;
-               if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
-                       printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
-       }
-       return res;
-}
-
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 /*
  * nfs_scan_commit - Scan an inode for commit requests
@@ -698,27 +627,27 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
  * Note: Should always be called with the Page Lock held!
  */
 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
-               struct inode *inode, struct page *page,
-               unsigned int offset, unsigned int bytes)
+               struct page *page, unsigned int offset, unsigned int bytes)
 {
-       struct nfs_server *server = NFS_SERVER(inode);
+       struct inode *inode = page->mapping->host;
        struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs_page         *req, *new = NULL;
        unsigned long           rqend, end;
 
        end = offset + bytes;
 
-       if (nfs_wait_on_write_congestion(page->mapping, server->flags & NFS_MOUNT_INTR))
+       if (nfs_wait_on_write_congestion(page->mapping, NFS_SERVER(inode)->flags & NFS_MOUNT_INTR))
                return ERR_PTR(-ERESTARTSYS);
        for (;;) {
                /* Loop over all inode entries and see if we find
                 * A request for the page we wish to update
                 */
                spin_lock(&nfsi->req_lock);
-               req = _nfs_find_request(inode, page->index);
+               req = nfs_page_find_request_locked(page);
                if (req) {
                        if (!nfs_lock_request_dontget(req)) {
                                int error;
+
                                spin_unlock(&nfsi->req_lock);
                                error = nfs_wait_on_request(req);
                                nfs_release_request(req);
@@ -745,7 +674,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
                                return ERR_PTR(error);
                        }
                        spin_unlock(&nfsi->req_lock);
-                       nfs_mark_request_dirty(new);
                        return new;
                }
                spin_unlock(&nfsi->req_lock);
@@ -786,9 +714,8 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
 int nfs_flush_incompatible(struct file *file, struct page *page)
 {
        struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
-       struct inode    *inode = page->mapping->host;
        struct nfs_page *req;
-       int             status = 0;
+       int do_flush, status;
        /*
         * Look for a request corresponding to this page. If there
         * is one, and it belongs to another file, we flush it out
@@ -797,13 +724,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
         * Also do the same if we find a request from an existing
         * dropped page.
         */
-       req = nfs_find_request(inode, page->index);
-       if (req) {
-               if (req->wb_page != page || ctx != req->wb_context)
-                       status = nfs_wb_page(inode, page);
+       do {
+               req = nfs_page_find_request(page);
+               if (req == NULL)
+                       return 0;
+               do_flush = req->wb_page != page || req->wb_context != ctx
+                       || !nfs_dirty_request(req);
                nfs_release_request(req);
-       }
-       return (status < 0) ? status : 0;
+               if (!do_flush)
+                       return 0;
+               status = nfs_wb_page(page->mapping->host, page);
+       } while (status == 0);
+       return status;
 }
 
 /*
@@ -817,7 +749,6 @@ int nfs_updatepage(struct file *file, struct page *page,
 {
        struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
        struct inode    *inode = page->mapping->host;
-       struct nfs_page *req;
        int             status = 0;
 
        nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -827,62 +758,18 @@ int nfs_updatepage(struct file *file, struct page *page,
                file->f_dentry->d_name.name, count,
                (long long)(page_offset(page) +offset));
 
-       if (IS_SYNC(inode)) {
-               status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
-               if (status > 0) {
-                       if (offset == 0 && status == PAGE_CACHE_SIZE)
-                               SetPageUptodate(page);
-                       return 0;
-               }
-               return status;
-       }
-
        /* If we're not using byte range locks, and we know the page
         * is entirely in cache, it may be more efficient to avoid
         * fragmenting write requests.
         */
        if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
-               loff_t end_offs = i_size_read(inode) - 1;
-               unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT;
-
-               count += offset;
+               count = max(count + offset, nfs_page_length(page));
                offset = 0;
-               if (unlikely(end_offs < 0)) {
-                       /* Do nothing */
-               } else if (page->index == end_index) {
-                       unsigned int pglen;
-                       pglen = (unsigned int)(end_offs & (PAGE_CACHE_SIZE-1)) + 1;
-                       if (count < pglen)
-                               count = pglen;
-               } else if (page->index < end_index)
-                       count = PAGE_CACHE_SIZE;
        }
 
-       /*
-        * Try to find an NFS request corresponding to this page
-        * and update it.
-        * If the existing request cannot be updated, we must flush
-        * it out now.
-        */
-       do {
-               req = nfs_update_request(ctx, inode, page, offset, count);
-               status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
-               if (status != -EBUSY)
-                       break;
-               /* Request could not be updated. Flush it out and try again */
-               status = nfs_wb_page(inode, page);
-       } while (status >= 0);
-       if (status < 0)
-               goto done;
-
-       status = 0;
+       status = nfs_writepage_setup(ctx, page, offset, count);
+       __set_page_dirty_nobuffers(page);
 
-       /* Update file length */
-       nfs_grow_file(page, offset, count);
-       /* Set the PG_uptodate flag? */
-       nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
-       nfs_unlock_request(req);
-done:
         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
                        status, (long long)i_size_read(inode));
        if (status < 0)
@@ -897,7 +784,7 @@ static void nfs_writepage_release(struct nfs_page *req)
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
        if (!PageError(req->wb_page)) {
                if (NFS_NEED_RESCHED(req)) {
-                       nfs_mark_request_dirty(req);
+                       nfs_redirty_request(req);
                        goto out;
                } else if (NFS_NEED_COMMIT(req)) {
                        nfs_mark_request_commit(req);
@@ -979,9 +866,7 @@ static void nfs_execute_write(struct nfs_write_data *data)
        sigset_t oldset;
 
        rpc_clnt_sigmask(clnt, &oldset);
-       lock_kernel();
        rpc_execute(&data->task);
-       unlock_kernel();
        rpc_clnt_sigunmask(clnt, &oldset);
 }
 
@@ -1015,7 +900,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
        atomic_set(&req->wb_complete, requests);
 
        ClearPageError(page);
-       set_page_writeback(page);
        offset = 0;
        nbytes = req->wb_bytes;
        do {
@@ -1043,9 +927,9 @@ out_bad:
        while (!list_empty(&list)) {
                data = list_entry(list.next, struct nfs_write_data, pages);
                list_del(&data->pages);
-               nfs_writedata_free(data);
+               nfs_writedata_release(data);
        }
-       nfs_mark_request_dirty(req);
+       nfs_redirty_request(req);
        nfs_clear_page_writeback(req);
        return -ENOMEM;
 }
@@ -1076,7 +960,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
                nfs_list_remove_request(req);
                nfs_list_add_request(req, &data->pages);
                ClearPageError(req->wb_page);
-               set_page_writeback(req->wb_page);
                *pages++ = req->wb_page;
                count += req->wb_bytes;
        }
@@ -1091,7 +974,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
        while (!list_empty(head)) {
                struct nfs_page *req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_mark_request_dirty(req);
+               nfs_redirty_request(req);
                nfs_clear_page_writeback(req);
        }
        return -ENOMEM;
@@ -1126,7 +1009,7 @@ out_err:
        while (!list_empty(head)) {
                req = nfs_list_entry(head->next);
                nfs_list_remove_request(req);
-               nfs_mark_request_dirty(req);
+               nfs_redirty_request(req);
                nfs_clear_page_writeback(req);
        }
        return error;
@@ -1442,7 +1325,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
                }
                /* We have a mismatch. Write the page again */
                dprintk(" mismatch\n");
-               nfs_mark_request_dirty(req);
+               nfs_redirty_request(req);
        next:
                nfs_clear_page_writeback(req);
        }
@@ -1459,18 +1342,17 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i
 }
 #endif
 
-static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
-                          unsigned int npages, int how)
+static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
+       struct nfs_inode *nfsi = NFS_I(mapping->host);
        LIST_HEAD(head);
-       int res;
+       long res;
 
        spin_lock(&nfsi->req_lock);
-       res = nfs_scan_dirty(inode, &head, idx_start, npages);
+       res = nfs_scan_dirty(mapping, wbc, &head);
        spin_unlock(&nfsi->req_lock);
        if (res) {
-               int error = nfs_flush_list(inode, &head, res, how);
+               int error = nfs_flush_list(mapping->host, &head, res, how);
                if (error < 0)
                        return error;
        }
@@ -1496,38 +1378,62 @@ int nfs_commit_inode(struct inode *inode, int how)
 }
 #endif
 
-int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
-               unsigned int npages, int how)
+long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
 {
+       struct inode *inode = mapping->host;
        struct nfs_inode *nfsi = NFS_I(inode);
+       unsigned long idx_start, idx_end;
+       unsigned int npages = 0;
        LIST_HEAD(head);
        int nocommit = how & FLUSH_NOCOMMIT;
-       int pages, ret;
-
+       long pages, ret;
+
+       /* FIXME */
+       if (wbc->range_cyclic)
+               idx_start = 0;
+       else {
+               idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
+               idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
+               if (idx_end > idx_start) {
+                       unsigned long l_npages = 1 + idx_end - idx_start;
+                       npages = l_npages;
+                       if (sizeof(npages) != sizeof(l_npages) &&
+                                       (unsigned long)npages != l_npages)
+                               npages = 0;
+               }
+       }
        how &= ~FLUSH_NOCOMMIT;
        spin_lock(&nfsi->req_lock);
        do {
+               wbc->pages_skipped = 0;
                ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
                if (ret != 0)
                        continue;
-               pages = nfs_scan_dirty(inode, &head, idx_start, npages);
+               pages = nfs_scan_dirty(mapping, wbc, &head);
                if (pages != 0) {
                        spin_unlock(&nfsi->req_lock);
-                       if (how & FLUSH_INVALIDATE)
+                       if (how & FLUSH_INVALIDATE) {
                                nfs_cancel_dirty_list(&head);
-                       else
+                               ret = pages;
+                       } else
                                ret = nfs_flush_list(inode, &head, pages, how);
                        spin_lock(&nfsi->req_lock);
                        continue;
                }
+               if (wbc->pages_skipped != 0)
+                       continue;
                if (nocommit)
                        break;
                pages = nfs_scan_commit(inode, &head, idx_start, npages);
-               if (pages == 0)
+               if (pages == 0) {
+                       if (wbc->pages_skipped != 0)
+                               continue;
                        break;
+               }
                if (how & FLUSH_INVALIDATE) {
                        spin_unlock(&nfsi->req_lock);
                        nfs_cancel_commit_list(&head);
+                       ret = pages;
                        spin_lock(&nfsi->req_lock);
                        continue;
                }
@@ -1540,6 +1446,106 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
        return ret;
 }
 
+/*
+ * flush the inode to disk.
+ */
+int nfs_wb_all(struct inode *inode)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct writeback_control wbc = {
+               .bdi = mapping->backing_dev_info,
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = LONG_MAX,
+               .for_writepages = 1,
+               .range_cyclic = 1,
+       };
+       int ret;
+
+       ret = generic_writepages(mapping, &wbc);
+       if (ret < 0)
+               goto out;
+       ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
+       if (ret >= 0)
+               return 0;
+out:
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return ret;
+}
+
+int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
+{
+       struct writeback_control wbc = {
+               .bdi = mapping->backing_dev_info,
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = LONG_MAX,
+               .range_start = range_start,
+               .range_end = range_end,
+               .for_writepages = 1,
+       };
+       int ret;
+
+       if (!(how & FLUSH_NOWRITEPAGE)) {
+               ret = generic_writepages(mapping, &wbc);
+               if (ret < 0)
+                       goto out;
+       }
+       ret = nfs_sync_mapping_wait(mapping, &wbc, how);
+       if (ret >= 0)
+               return 0;
+out:
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return ret;
+}
+
+int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
+{
+       loff_t range_start = page_offset(page);
+       loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+       struct writeback_control wbc = {
+               .bdi = page->mapping->backing_dev_info,
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = LONG_MAX,
+               .range_start = range_start,
+               .range_end = range_end,
+       };
+       int ret;
+
+       BUG_ON(!PageLocked(page));
+       if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) {
+               ret = nfs_writepage_locked(page, &wbc);
+               if (ret < 0)
+                       goto out;
+       }
+       ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
+       if (ret >= 0)
+               return 0;
+out:
+       __mark_inode_dirty(inode, I_DIRTY_PAGES);
+       return ret;
+}
+
+/*
+ * Write back all requests on one page - we do this before reading it.
+ */
+int nfs_wb_page(struct inode *inode, struct page* page)
+{
+       return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
+}
+
+int nfs_set_page_dirty(struct page *page)
+{
+       struct nfs_page *req;
+
+       req = nfs_page_find_request(page);
+       if (req != NULL) {
+               /* Mark any existing write requests for flushing */
+               set_bit(PG_NEED_FLUSH, &req->wb_flags);
+               nfs_release_request(req);
+       }
+       return __set_page_dirty_nobuffers(page);
+}
+
+
 int __init nfs_init_writepagecache(void)
 {
        nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
index b4baca3053c35a96b526107096f1864e349848f6..277df40f098de5038b7a62cb7cca1ca138a7e12c 100644 (file)
 
 #define NFSDDBG_FACILITY               NFSDDBG_XDR
 
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 
 /*
  * Mapping of S_IF* types to NFS file types
@@ -42,14 +38,14 @@ static u32  nfs3_ftypes[] = {
 /*
  * XDR functions for basic NFS types
  */
-static inline __be32 *
+static __be32 *
 encode_time3(__be32 *p, struct timespec *time)
 {
        *p++ = htonl((u32) time->tv_sec); *p++ = htonl(time->tv_nsec);
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_time3(__be32 *p, struct timespec *time)
 {
        time->tv_sec = ntohl(*p++);
@@ -57,7 +53,7 @@ decode_time3(__be32 *p, struct timespec *time)
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_fh(__be32 *p, struct svc_fh *fhp)
 {
        unsigned int size;
@@ -77,7 +73,7 @@ __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp)
        return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
        unsigned int size = fhp->fh_handle.fh_size;
@@ -91,7 +87,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
        char            *name;
@@ -107,7 +103,7 @@ decode_filename(__be32 *p, char **namp, int *lenp)
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr3(__be32 *p, struct iattr *iap)
 {
        u32     tmp;
@@ -153,7 +149,7 @@ decode_sattr3(__be32 *p, struct iattr *iap)
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
              struct kstat *stat)
 {
@@ -186,7 +182,7 @@ encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_saved_post_attr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
 {
        struct inode    *inode = fhp->fh_dentry->d_inode;
@@ -776,7 +772,7 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, __be32 *p,
                return xdr_ressize_check(rqstp, p);
 }
 
-static inline __be32 *
+static __be32 *
 encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
             int namlen, ino_t ino)
 {
@@ -790,7 +786,7 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name,
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p,
                struct svc_fh *fhp)
 {
index 293b6495829f863a16e45504f26b6d47d12d2128..640c92b2a9f71e7d3a1fd8a032e1f9ef4f91c5ff 100644 (file)
@@ -84,10 +84,10 @@ static void nfs4_set_recdir(char *recdir);
  */
 static DEFINE_MUTEX(client_mutex);
 
-static kmem_cache_t *stateowner_slab = NULL;
-static kmem_cache_t *file_slab = NULL;
-static kmem_cache_t *stateid_slab = NULL;
-static kmem_cache_t *deleg_slab = NULL;
+static struct kmem_cache *stateowner_slab = NULL;
+static struct kmem_cache *file_slab = NULL;
+static struct kmem_cache *stateid_slab = NULL;
+static struct kmem_cache *deleg_slab = NULL;
 
 void
 nfs4_lock_state(void)
@@ -1003,7 +1003,7 @@ alloc_init_file(struct inode *ino)
 }
 
 static void
-nfsd4_free_slab(kmem_cache_t **slab)
+nfsd4_free_slab(struct kmem_cache **slab)
 {
        if (*slab == NULL)
                return;
@@ -1829,9 +1829,8 @@ out:
 }
 
 static struct workqueue_struct *laundry_wq;
-static struct work_struct laundromat_work;
-static void laundromat_main(void *);
-static DECLARE_WORK(laundromat_work, laundromat_main, NULL);
+static void laundromat_main(struct work_struct *);
+static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
 
 __be32
 nfsd4_renew(clientid_t *clid)
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void)
 }
 
 void
-laundromat_main(void *not_used)
+laundromat_main(struct work_struct *not_used)
 {
        time_t t;
 
index 56ebb1443e0eb7bf067a773690dee36a1d60f2ee..f5243f943996030896b9dc4280b7b5f550252fec 100644 (file)
 
 #define NFSDDBG_FACILITY               NFSDDBG_XDR
 
-
-#ifdef NFSD_OPTIMIZE_SPACE
-# define inline
-#endif
-
 /*
  * Mapping of S_IF* types to NFS file types
  */
@@ -55,7 +50,7 @@ __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp)
        return decode_fh(p, fhp);
 }
 
-static inline __be32 *
+static __be32 *
 encode_fh(__be32 *p, struct svc_fh *fhp)
 {
        memcpy(p, &fhp->fh_handle.fh_base, NFS_FHSIZE);
@@ -66,7 +61,7 @@ encode_fh(__be32 *p, struct svc_fh *fhp)
  * Decode a file name and make sure that the path contains
  * no slashes or null bytes.
  */
-static inline __be32 *
+static __be32 *
 decode_filename(__be32 *p, char **namp, int *lenp)
 {
        char            *name;
@@ -82,7 +77,7 @@ decode_filename(__be32 *p, char **namp, int *lenp)
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_pathname(__be32 *p, char **namp, int *lenp)
 {
        char            *name;
@@ -98,7 +93,7 @@ decode_pathname(__be32 *p, char **namp, int *lenp)
        return p;
 }
 
-static inline __be32 *
+static __be32 *
 decode_sattr(__be32 *p, struct iattr *iap)
 {
        u32     tmp, tmp1;
index 046fde8170eaf50038d907602106409b3dd104b0..65e640c61c8bb29f1b879d4d3898b3939462cfdc 100644 (file)
@@ -4421,6 +4421,73 @@ static wchar_t *page_charset2uni[256] = {
        c2u_F8, c2u_F9, c2u_FA, c2u_FB, c2u_FC, c2u_FD, c2u_FE, NULL,   
 };
 
+static unsigned char u2c_00[512] = {
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x0C-0x0F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x13 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x14-0x17 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x1C-0x1F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x23 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x24-0x27 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x2C-0x2F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x33 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x34-0x37 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x3C-0x3F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x43 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x44-0x47 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x4C-0x4F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x53 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x54-0x57 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x5C-0x5F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x63 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x64-0x67 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x6C-0x6F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x73 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x74-0x77 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x7C-0x7F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x83 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x84-0x87 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x8C-0x8F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x93 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x94-0x97 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9B */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x9C-0x9F */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA0-0xA3 */
+       0xA1, 0xE8, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xEC, /* 0xA4-0xA7 */
+       0xA1, 0xA7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xA8-0xAB */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xAC-0xAF */
+       0xA1, 0xE3, 0xA1, 0xC0, 0x00, 0x00, 0x00, 0x00, /* 0xB0-0xB3 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xA4, /* 0xB4-0xB7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xB8-0xBB */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xBC-0xBF */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC0-0xC3 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC4-0xC7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xC8-0xCB */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xCC-0xCF */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD0-0xD3 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC1, /* 0xD4-0xD7 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xD8-0xDB */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xDC-0xDF */
+       0xA8, 0xA4, 0xA8, 0xA2, 0x00, 0x00, 0x00, 0x00, /* 0xE0-0xE3 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xE4-0xE7 */
+       0xA8, 0xA8, 0xA8, 0xA6, 0xA8, 0xBA, 0x00, 0x00, /* 0xE8-0xEB */
+       0xA8, 0xAC, 0xA8, 0xAA, 0x00, 0x00, 0x00, 0x00, /* 0xEC-0xEF */
+       0x00, 0x00, 0x00, 0x00, 0xA8, 0xB0, 0xA8, 0xAE, /* 0xF0-0xF3 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, 0xC2, /* 0xF4-0xF7 */
+       0x00, 0x00, 0xA8, 0xB4, 0xA8, 0xB2, 0x00, 0x00, /* 0xF8-0xFB */
+       0xA8, 0xB9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xFC-0xFF */
+};
+
 static unsigned char u2c_01[512] = {
        0xA8, 0xA1, 0xA8, 0xA1, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x03 */
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x04-0x07 */
@@ -10825,7 +10892,7 @@ static unsigned char u2c_FF[512] = {
 };
 
 static unsigned char *page_uni2charset[256] = {
-       NULL,   u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,   
+       u2c_00, u2c_01, u2c_02, u2c_03, u2c_04, NULL,   NULL,   NULL,
        NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
        NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
        NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   NULL,   
@@ -10936,11 +11003,34 @@ static int uni2char(const wchar_t uni,
        unsigned char *uni2charset;
        unsigned char cl = uni&0xFF;
        unsigned char ch = (uni>>8)&0xFF;
-       int n;
+       unsigned char out0,out1;
 
        if (boundlen <= 0)
                return -ENAMETOOLONG;
 
+       if (uni == 0x20ac) {/* Euro symbol.The only exception with a non-ascii unicode */
+               out[0] = 0x80;
+               return 1;
+       }
+
+       if (ch == 0) { /* handle the U00 plane*/
+               /* if (cl == 0) return -EINVAL;*/ /*U0000 is legal in cp936*/
+               out0 = u2c_00[cl*2];
+               out1 = u2c_00[cl*2+1];
+               if (out0 == 0x00 && out1 == 0x00) {
+                       if (cl<0x80) {
+                               out[0] = cl;
+                               return 1;
+                       }
+                       return -EINVAL;
+               } else {
+                       if (boundlen <= 1)
+                               return -ENAMETOOLONG;
+                       out[0] = out0;
+                       out[1] = out1;
+                       return 2;
+               }
+       }
 
        uni2charset = page_uni2charset[ch];
        if (uni2charset) {
@@ -10950,15 +11040,10 @@ static int uni2char(const wchar_t uni,
                out[1] = uni2charset[cl*2+1];
                if (out[0] == 0x00 && out[1] == 0x00)
                        return -EINVAL;
-               n = 2;
-       } else if (ch==0 && cl) {
-               out[0] = cl;
-               n = 1;
+               return 2;
        }
        else
                return -EINVAL;
-
-       return n;
 }
 
 static int char2uni(const unsigned char *rawstring, int boundlen,
@@ -10972,7 +11057,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen,
                return -ENAMETOOLONG;
 
        if (boundlen == 1) {
-               *uni = rawstring[0];
+               if (rawstring[0]==0x80) { /* Euro symbol.The only exception with a non-ascii unicode */
+                       *uni = 0x20ac;
+               } else {
+                       *uni = rawstring[0];
+               }
                return 1;
        }
 
@@ -10986,7 +11075,11 @@ static int char2uni(const unsigned char *rawstring, int boundlen,
                        return -EINVAL;
                n = 2;
        } else{
-               *uni = ch;
+               if (ch==0x80) {/* Euro symbol.The only exception with a non-ascii unicode */
+                       *uni = 0x20ac;
+               } else {
+                       *uni = ch;
+               }
                n = 1;
        }
        return n;
index 9f08e851cfb69ed9373344b7865b42b747451d0f..c577d8e1bd95d530604386e293f4ec5dd3884ce9 100644 (file)
@@ -1272,7 +1272,7 @@ ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
 {
        ntfs_attr_search_ctx *ctx;
 
-       ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, SLAB_NOFS);
+       ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
        if (ctx)
                ntfs_attr_init_search_ctx(ctx, ni, mrec);
        return ctx;
index e32cde486362bb21976d271fb02892effbeb2460..2194eff4974379062c1d19578228766555da2519 100644 (file)
@@ -38,7 +38,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
 {
        ntfs_index_context *ictx;
 
-       ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS);
+       ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
        if (ictx)
                *ictx = (ntfs_index_context){ .idx_ni = idx_ni };
        return ictx;
index 2d3de9c89818033a0c96f1a9f43afeb84132a285..247989891b4b6e45cd9f20548a91d202c1cea82e 100644 (file)
@@ -324,7 +324,7 @@ struct inode *ntfs_alloc_big_inode(struct super_block *sb)
        ntfs_inode *ni;
 
        ntfs_debug("Entering.");
-       ni = kmem_cache_alloc(ntfs_big_inode_cache, SLAB_NOFS);
+       ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
        if (likely(ni != NULL)) {
                ni->state = 0;
                return VFS_I(ni);
@@ -349,7 +349,7 @@ static inline ntfs_inode *ntfs_alloc_extent_inode(void)
        ntfs_inode *ni;
 
        ntfs_debug("Entering.");
-       ni = kmem_cache_alloc(ntfs_inode_cache, SLAB_NOFS);
+       ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
        if (likely(ni != NULL)) {
                ni->state = 0;
                return ni;
index 6a495f7369f9e7fbd170d0802014f4d750d6c6ae..005ca4b0f132fc1e4515a5f5b9e5e7d1acd26726 100644 (file)
@@ -266,7 +266,7 @@ int ntfs_nlstoucs(const ntfs_volume *vol, const char *ins,
 
        /* We do not trust outside sources. */
        if (likely(ins)) {
-               ucs = kmem_cache_alloc(ntfs_name_cache, SLAB_NOFS);
+               ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
                if (likely(ucs)) {
                        for (i = o = 0; i < ins_len; i += wc_len) {
                                wc_len = nls->char2uni(ins + i, ins_len - i,
index f43bc5f18a352c0dfe4d64c842e2473dce308768..edc91ca3792ac2b331a33540ec8d5a8250f24566 100644 (file)
@@ -52,14 +52,14 @@ static int ocfs2_extent_contig(struct inode *inode,
                               u64 blkno);
 
 static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
-                                    struct ocfs2_journal_handle *handle,
+                                    handle_t *handle,
                                     struct inode *inode,
                                     int wanted,
                                     struct ocfs2_alloc_context *meta_ac,
                                     struct buffer_head *bhs[]);
 
 static int ocfs2_add_branch(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
+                           handle_t *handle,
                            struct inode *inode,
                            struct buffer_head *fe_bh,
                            struct buffer_head *eb_bh,
@@ -67,14 +67,14 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
                            struct ocfs2_alloc_context *meta_ac);
 
 static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
-                                 struct ocfs2_journal_handle *handle,
+                                 handle_t *handle,
                                  struct inode *inode,
                                  struct buffer_head *fe_bh,
                                  struct ocfs2_alloc_context *meta_ac,
                                  struct buffer_head **ret_new_eb_bh);
 
 static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
-                                 struct ocfs2_journal_handle *handle,
+                                 handle_t *handle,
                                  struct inode *inode,
                                  struct buffer_head *fe_bh,
                                  u64 blkno,
@@ -152,7 +152,7 @@ bail:
  * l_count for you
  */
 static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
-                                    struct ocfs2_journal_handle *handle,
+                                    handle_t *handle,
                                     struct inode *inode,
                                     int wanted,
                                     struct ocfs2_alloc_context *meta_ac,
@@ -253,7 +253,7 @@ bail:
  * contain a single record with e_clusters == 0.
  */
 static int ocfs2_add_branch(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
+                           handle_t *handle,
                            struct inode *inode,
                            struct buffer_head *fe_bh,
                            struct buffer_head *eb_bh,
@@ -418,7 +418,7 @@ bail:
  * after this call.
  */
 static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
-                                 struct ocfs2_journal_handle *handle,
+                                 handle_t *handle,
                                  struct inode *inode,
                                  struct buffer_head *fe_bh,
                                  struct ocfs2_alloc_context *meta_ac,
@@ -520,7 +520,7 @@ bail:
  * down.
  */
 static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
-                                 struct ocfs2_journal_handle *handle,
+                                 handle_t *handle,
                                  struct inode *inode,
                                  struct buffer_head *fe_bh,
                                  u64 start_blk,
@@ -809,7 +809,7 @@ bail:
 
 /* the caller needs to update fe->i_clusters */
 int ocfs2_insert_extent(struct ocfs2_super *osb,
-                       struct ocfs2_journal_handle *handle,
+                       handle_t *handle,
                        struct inode *inode,
                        struct buffer_head *fe_bh,
                        u64 start_blk,
@@ -951,7 +951,7 @@ static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
 }
 
 static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
-                                    struct ocfs2_journal_handle *handle,
+                                    handle_t *handle,
                                     u64 start_blk,
                                     unsigned int num_clusters)
 {
@@ -1034,7 +1034,7 @@ bail:
 }
 
 static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
-                                        struct ocfs2_journal_handle *handle,
+                                        handle_t *handle,
                                         struct inode *data_alloc_inode,
                                         struct buffer_head *data_alloc_bh)
 {
@@ -1113,7 +1113,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
 {
        int status;
        unsigned int num_to_flush;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct inode *data_alloc_inode = NULL;
        struct buffer_head *tl_bh = osb->osb_tl_bh;
@@ -1130,7 +1130,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
        if (!OCFS2_IS_VALID_DINODE(di)) {
                OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
                status = -EIO;
-               goto bail;
+               goto out;
        }
 
        num_to_flush = le16_to_cpu(tl->tl_used);
@@ -1138,14 +1138,7 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
             num_to_flush, (unsigned long long)OCFS2_I(tl_inode)->ip_blkno);
        if (!num_to_flush) {
                status = 0;
-               goto bail;
-       }
-
-       handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
+               goto out;
        }
 
        data_alloc_inode = ocfs2_get_system_file_inode(osb,
@@ -1154,41 +1147,40 @@ static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
        if (!data_alloc_inode) {
                status = -EINVAL;
                mlog(ML_ERROR, "Could not get bitmap inode!\n");
-               goto bail;
+               goto out;
        }
 
-       ocfs2_handle_add_inode(handle, data_alloc_inode);
-       status = ocfs2_meta_lock(data_alloc_inode, handle, &data_alloc_bh, 1);
+       mutex_lock(&data_alloc_inode->i_mutex);
+
+       status = ocfs2_meta_lock(data_alloc_inode, &data_alloc_bh, 1);
        if (status < 0) {
                mlog_errno(status);
-               goto bail;
+               goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_TRUNCATE_LOG_UPDATE);
+       handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
-               handle = NULL;
                mlog_errno(status);
-               goto bail;
+               goto out_unlock;
        }
 
        status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
                                               data_alloc_bh);
-       if (status < 0) {
+       if (status < 0)
                mlog_errno(status);
-               goto bail;
-       }
 
-bail:
-       if (handle)
-               ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 
-       if (data_alloc_inode)
-               iput(data_alloc_inode);
+out_unlock:
+       brelse(data_alloc_bh);
+       ocfs2_meta_unlock(data_alloc_inode, 1);
 
-       if (data_alloc_bh)
-               brelse(data_alloc_bh);
+out_mutex:
+       mutex_unlock(&data_alloc_inode->i_mutex);
+       iput(data_alloc_inode);
 
+out:
        mlog_exit(status);
        return status;
 }
@@ -1205,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
        return status;
 }
 
-static void ocfs2_truncate_log_worker(void *data)
+static void ocfs2_truncate_log_worker(struct work_struct *work)
 {
        int status;
-       struct ocfs2_super *osb = data;
+       struct ocfs2_super *osb =
+               container_of(work, struct ocfs2_super,
+                            osb_truncate_log_wq.work);
 
        mlog_entry_void();
 
@@ -1347,7 +1341,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
        int i;
        unsigned int clusters, num_recs, start_cluster;
        u64 start_blk;
-       struct ocfs2_journal_handle *handle;
+       handle_t *handle;
        struct inode *tl_inode = osb->osb_tl_inode;
        struct ocfs2_truncate_log *tl;
 
@@ -1373,8 +1367,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
                        }
                }
 
-               handle = ocfs2_start_trans(osb, NULL,
-                                          OCFS2_TRUNCATE_LOG_UPDATE);
+               handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
@@ -1387,7 +1380,7 @@ int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
 
                status = ocfs2_truncate_log_append(osb, handle,
                                                   start_blk, clusters);
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
                if (status < 0) {
                        mlog_errno(status);
                        goto bail_up;
@@ -1441,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
        /* ocfs2_truncate_log_shutdown keys on the existence of
         * osb->osb_tl_inode so we don't set any of the osb variables
         * until we're sure all is well. */
-       INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+       INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
+                         ocfs2_truncate_log_worker);
        osb->osb_tl_bh    = tl_bh;
        osb->osb_tl_inode = tl_inode;
 
@@ -1543,7 +1537,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
                             struct inode *inode,
                             struct buffer_head *fe_bh,
                             struct buffer_head *old_last_eb_bh,
-                            struct ocfs2_journal_handle *handle,
+                            handle_t *handle,
                             struct ocfs2_truncate_context *tc)
 {
        int status, i, depth;
@@ -1782,7 +1776,7 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
        struct ocfs2_extent_block *eb;
        struct ocfs2_extent_list *el;
        struct buffer_head *last_eb_bh;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct inode *tl_inode = osb->osb_tl_inode;
 
        mlog_entry_void();
@@ -1868,7 +1862,7 @@ start:
 
        credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
                                                fe, el);
-       handle = ocfs2_start_trans(osb, NULL, credits);
+       handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -1891,7 +1885,7 @@ start:
        mutex_unlock(&tl_inode->i_mutex);
        tl_sem = 0;
 
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
        handle = NULL;
 
        BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters);
@@ -1906,7 +1900,7 @@ bail:
                mutex_unlock(&tl_inode->i_mutex);
 
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
 
        if (last_eb_bh)
                brelse(last_eb_bh);
@@ -2011,10 +2005,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
                mutex_lock(&ext_alloc_inode->i_mutex);
                (*tc)->tc_ext_alloc_inode = ext_alloc_inode;
 
-               status = ocfs2_meta_lock(ext_alloc_inode,
-                                        NULL,
-                                        &ext_alloc_bh,
-                                        1);
+               status = ocfs2_meta_lock(ext_alloc_inode, &ext_alloc_bh, 1);
                if (status < 0) {
                        mlog_errno(status);
                        goto bail;
index 12ba897743f400ebaf2715249cb331e8a6be78d5..0b82e8044325351ef5878c3400156ff3048213f7 100644 (file)
@@ -28,7 +28,7 @@
 
 struct ocfs2_alloc_context;
 int ocfs2_insert_extent(struct ocfs2_super *osb,
-                       struct ocfs2_journal_handle *handle,
+                       handle_t *handle,
                        struct inode *inode,
                        struct buffer_head *fe_bh,
                        u64 blkno,
index 3d7c082a8f58288f8bdd2cfd1bd1f4fdf2fbab4d..2f7268e81520dbc0fdabc3155189459a0db67356 100644 (file)
@@ -200,7 +200,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
 
        mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
 
-       ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+       ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
        if (ret != 0) {
                if (ret == AOP_TRUNCATED_PAGE)
                        unlock = 0;
@@ -305,7 +305,7 @@ static int ocfs2_prepare_write(struct file *file, struct page *page,
 
        mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
 
-       ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+       ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
        if (ret != 0) {
                mlog_errno(ret);
                goto out;
@@ -355,16 +355,16 @@ static int walk_page_buffers(     handle_t *handle,
        return ret;
 }
 
-struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
                                                         struct page *page,
                                                         unsigned from,
                                                         unsigned to)
 {
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        int ret = 0;
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (!handle) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -372,7 +372,7 @@ struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
        }
 
        if (ocfs2_should_order_data(inode)) {
-               ret = walk_page_buffers(handle->k_handle,
+               ret = walk_page_buffers(handle,
                                        page_buffers(page),
                                        from, to, NULL,
                                        ocfs2_journal_dirty_data);
@@ -382,7 +382,7 @@ struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
 out:
        if (ret) {
                if (handle)
-                       ocfs2_commit_trans(handle);
+                       ocfs2_commit_trans(osb, handle);
                handle = ERR_PTR(ret);
        }
        return handle;
@@ -394,7 +394,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page,
        int ret;
        struct buffer_head *di_bh = NULL;
        struct inode *inode = page->mapping->host;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_dinode *di;
 
        mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
@@ -412,7 +412,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page,
         *    stale inode allocation image (i_size, i_clusters, etc).
         */
 
-       ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page);
+       ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page);
        if (ret != 0) {
                mlog_errno(ret);
                goto out;
@@ -464,7 +464,7 @@ static int ocfs2_commit_write(struct file *file, struct page *page,
        }
 
 out_commit:
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 out_unlock_data:
        ocfs2_data_unlock(inode, 1);
 out_unlock_meta:
@@ -490,7 +490,7 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
         * accessed concurrently from multiple nodes.
         */
        if (!INODE_JOURNAL(inode)) {
-               err = ocfs2_meta_lock(inode, NULL, NULL, 0);
+               err = ocfs2_meta_lock(inode, NULL, 0);
                if (err) {
                        if (err != -ENOENT)
                                mlog_errno(err);
index e88c3f0b8fa9f3bfc5078f992360cd3cbea8f69c..f446a15eab88909719e45ed875f94e97de851066 100644 (file)
@@ -25,7 +25,7 @@
 int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
                               unsigned from, unsigned to);
 
-struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
                                                         struct page *page,
                                                         unsigned from,
                                                         unsigned to);
index 305cba3681fe0126b4d1e33dd58c3b0cc3bea8a5..4cd9a9580456cdf0313c4a5b75c6f0d4e2560884 100644 (file)
@@ -141,7 +141,7 @@ struct o2hb_region {
         * recognizes a node going up and down in one iteration */
        u64                     hr_generation;
 
-       struct work_struct      hr_write_timeout_work;
+       struct delayed_work     hr_write_timeout_work;
        unsigned long           hr_last_timeout_start;
 
        /* Used during o2hb_check_slot to hold a copy of the block
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt {
        int               wc_error;
 };
 
-static void o2hb_write_timeout(void *arg)
+static void o2hb_write_timeout(struct work_struct *work)
 {
-       struct o2hb_region *reg = arg;
+       struct o2hb_region *reg =
+               container_of(work, struct o2hb_region,
+                            hr_write_timeout_work.work);
 
        mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
             "milliseconds\n", reg->hr_dev_name,
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
                goto out;
        }
 
-       INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg);
+       INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
 
        /*
         * A node is considered live after it has beat LIVE_THRESHOLD
index 7bba98fbfc1532043fd7b48ad21e61dd15fd75aa..4705d659fe576e8bbc89561232d7f6447b1efa16 100644 (file)
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void)
        o2quo_fence_self();
 }
 
-static void o2quo_make_decision(void *arg)
+static void o2quo_make_decision(struct work_struct *work)
 {
        int quorum;
        int lowest_hb, lowest_reachable = 0, fence = 0;
@@ -306,7 +306,7 @@ void o2quo_init(void)
        struct o2quo_state *qs = &o2quo_state;
 
        spin_lock_init(&qs->qs_lock);
-       INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+       INIT_WORK(&qs->qs_work, o2quo_make_decision);
 }
 
 void o2quo_exit(void)
index b650efa8c8bebd01991f43284287fdfaa49d1f41..9b3209dc0b16a147e8b81f0e1eddbfcb2714e27e 100644 (file)
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] =
                 [O2NET_ERR_DIED]       = -EHOSTDOWN,};
 
 /* can't quite avoid *all* internal declarations :/ */
-static void o2net_sc_connect_completed(void *arg);
-static void o2net_rx_until_empty(void *arg);
-static void o2net_shutdown_sc(void *arg);
+static void o2net_sc_connect_completed(struct work_struct *work);
+static void o2net_rx_until_empty(struct work_struct *work);
+static void o2net_shutdown_sc(struct work_struct *work);
 static void o2net_listen_data_ready(struct sock *sk, int bytes);
-static void o2net_sc_send_keep_req(void *arg);
+static void o2net_sc_send_keep_req(struct work_struct *work);
 static void o2net_idle_timer(unsigned long data);
 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
 
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
        o2nm_node_get(node);
        sc->sc_node = node;
 
-       INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
-       INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
-       INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
-       INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+       INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
+       INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
+       INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
+       INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
 
        init_timer(&sc->sc_idle_timeout);
        sc->sc_idle_timeout.function = o2net_idle_timer;
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc,
                sc_put(sc);
 }
 static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
-                                       struct work_struct *work,
+                                       struct delayed_work *work,
                                        int delay)
 {
        sc_get(sc);
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
                sc_put(sc);
 }
 static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
-                                        struct work_struct *work)
+                                        struct delayed_work *work)
 {
        if (cancel_delayed_work(work))
                sc_put(sc);
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn,
  * ourselves as state_change couldn't get the nn_lock and call set_nn_state
  * itself.
  */
-static void o2net_shutdown_sc(void *arg)
+static void o2net_shutdown_sc(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_shutdown_work);
        struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
 
        sclog(sc, "shutting down\n");
@@ -1201,9 +1203,10 @@ out:
 /* this work func is triggerd by data ready.  it reads until it can read no
  * more.  it interprets 0, eof, as fatal.  if data_ready hits while we're doing
  * our work the work struct will be marked and we'll be called again. */
-static void o2net_rx_until_empty(void *arg)
+static void o2net_rx_until_empty(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container, sc_rx_work);
        int ret;
 
        do {
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock)
 
 /* called when a connect completes and after a sock is accepted.  the
  * rx path will see the response and mark the sc valid */
-static void o2net_sc_connect_completed(void *arg)
+static void o2net_sc_connect_completed(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_connect_work);
 
        mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
               (unsigned long long)O2NET_PROTOCOL_VERSION,
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg)
 }
 
 /* this is called as a work_struct func. */
-static void o2net_sc_send_keep_req(void *arg)
+static void o2net_sc_send_keep_req(struct work_struct *work)
 {
-       struct o2net_sock_container *sc = arg;
+       struct o2net_sock_container *sc =
+               container_of(work, struct o2net_sock_container,
+                            sc_keepalive_work.work);
 
        o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
        sc_put(sc);
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
  * having a connect attempt fail, etc. This centralizes the logic which decides
  * if a connect attempt should be made or if we should give up and all future
  * transmit attempts should fail */
-static void o2net_start_connect(void *arg)
+static void o2net_start_connect(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_connect_work.work);
        struct o2net_sock_container *sc = NULL;
        struct o2nm_node *node = NULL, *mynode = NULL;
        struct socket *sock = NULL;
        struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
-       int ret = 0;
+       int ret = 0, stop;
 
        /* if we're greater we initiate tx, otherwise we accept */
        if (o2nm_this_node() <= o2net_num_from_nn(nn))
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg)
 
        spin_lock(&nn->nn_lock);
        /* see if we already have one pending or have given up */
-       if (nn->nn_sc || nn->nn_persistent_error)
-               arg = NULL;
+       stop = (nn->nn_sc || nn->nn_persistent_error);
        spin_unlock(&nn->nn_lock);
-       if (arg == NULL) /* *shrug*, needed some indicator */
+       if (stop)
                goto out;
 
        nn->nn_last_connect_attempt = jiffies;
@@ -1421,9 +1428,10 @@ out:
        return;
 }
 
-static void o2net_connect_expired(void *arg)
+static void o2net_connect_expired(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_connect_expired.work);
 
        spin_lock(&nn->nn_lock);
        if (!nn->nn_sc_valid) {
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg)
        spin_unlock(&nn->nn_lock);
 }
 
-static void o2net_still_up(void *arg)
+static void o2net_still_up(struct work_struct *work)
 {
-       struct o2net_node *nn = arg;
+       struct o2net_node *nn =
+               container_of(work, struct o2net_node, nn_still_up.work);
 
        o2quo_hb_still_up(o2net_num_from_nn(nn));
 }
@@ -1644,9 +1653,9 @@ out:
        return ret;
 }
 
-static void o2net_accept_many(void *arg)
+static void o2net_accept_many(struct work_struct *work)
 {
-       struct socket *sock = arg;
+       struct socket *sock = o2net_listen_sock;
        while (o2net_accept_one(sock) == 0)
                cond_resched();
 }
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port)
        write_unlock_bh(&sock->sk->sk_callback_lock);
 
        o2net_listen_sock = sock;
-       INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+       INIT_WORK(&o2net_listen_work, o2net_accept_many);
 
        sock->sk->sk_reuse = 1;
        ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
@@ -1819,9 +1828,10 @@ int o2net_init(void)
                struct o2net_node *nn = o2net_nn_from_num(i);
 
                spin_lock_init(&nn->nn_lock);
-               INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
-               INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
-               INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+               INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect);
+               INIT_DELAYED_WORK(&nn->nn_connect_expired,
+                                 o2net_connect_expired);
+               INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up);
                /* until we see hb from a node we'll return einval */
                nn->nn_persistent_error = -ENOTCONN;
                init_waitqueue_head(&nn->nn_sc_wq);
index 4b46aac7d243edd3d023935578d71ab457317bcc..daebbd3a2c8ceb630404566223e2189d96bb0ed6 100644 (file)
@@ -86,18 +86,18 @@ struct o2net_node {
         * connect attempt fails and so can be self-arming.  shutdown is
         * careful to first mark the nn such that no connects will be attempted
         * before canceling delayed connect work and flushing the queue. */
-       struct work_struct              nn_connect_work;
+       struct delayed_work             nn_connect_work;
        unsigned long                   nn_last_connect_attempt;
 
        /* this is queued as nodes come up and is canceled when a connection is
         * established.  this expiring gives up on the node and errors out
         * transmits */
-       struct work_struct              nn_connect_expired;
+       struct delayed_work             nn_connect_expired;
 
        /* after we give up on a socket we wait a while before deciding
         * that it is still heartbeating and that we should do some
         * quorum work */
-       struct work_struct              nn_still_up;
+       struct delayed_work             nn_still_up;
 };
 
 struct o2net_sock_container {
@@ -129,7 +129,7 @@ struct o2net_sock_container {
        struct work_struct      sc_shutdown_work;
 
        struct timer_list       sc_idle_timeout;
-       struct work_struct      sc_keepalive_work;
+       struct delayed_work     sc_keepalive_work;
 
        unsigned                sc_handshake_ok:1;
 
index 04e01915b86e4e4f6ab86c6056da2ebcf4050d09..baad2aa27c144935ec13df8a2d05324d1096eed0 100644 (file)
@@ -82,6 +82,7 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
        struct inode *inode = filp->f_dentry->d_inode;
        struct super_block * sb = inode->i_sb;
        unsigned int ra_sectors = 16;
+       int lock_level = 0;
 
        mlog_entry("dirino=%llu\n",
                   (unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -89,7 +90,15 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
        stored = 0;
        bh = NULL;
 
-       error = ocfs2_meta_lock(inode, NULL, NULL, 0);
+       error = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
+       if (lock_level && error >= 0) {
+               /* We release EX lock which used to update atime
+                * and get PR lock again to reduce contention
+                * on commonly accessed directories. */
+               ocfs2_meta_unlock(inode, 1);
+               lock_level = 0;
+               error = ocfs2_meta_lock(inode, NULL, 0);
+       }
        if (error < 0) {
                if (error != -ENOENT)
                        mlog_errno(error);
@@ -198,7 +207,7 @@ revalidate:
 
        stored = 0;
 bail:
-       ocfs2_meta_unlock(inode, 0);
+       ocfs2_meta_unlock(inode, lock_level);
 
 bail_nolock:
        mlog_exit(stored);
@@ -340,7 +349,7 @@ int ocfs2_empty_dir(struct inode *inode)
 
 /* returns a bh of the 1st new block in the allocation. */
 int ocfs2_do_extend_dir(struct super_block *sb,
-                       struct ocfs2_journal_handle *handle,
+                       handle_t *handle,
                        struct inode *dir,
                        struct buffer_head *parent_fe_bh,
                        struct ocfs2_alloc_context *data_ac,
@@ -398,7 +407,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
        struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
        struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_alloc_context *meta_ac = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct buffer_head *new_bh = NULL;
        struct ocfs2_dir_entry * de;
        struct super_block *sb = osb->sb;
@@ -409,13 +418,6 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
        mlog(0, "extending dir %llu (i_size = %lld)\n",
             (unsigned long long)OCFS2_I(dir)->ip_blkno, dir_i_size);
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        /* dir->i_size is always block aligned. */
        spin_lock(&OCFS2_I(dir)->ip_lock);
        if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
@@ -428,8 +430,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
                }
 
                if (!num_free_extents) {
-                       status = ocfs2_reserve_new_metadata(osb, handle,
-                                                           fe, &meta_ac);
+                       status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac);
                        if (status < 0) {
                                if (status != -ENOSPC)
                                        mlog_errno(status);
@@ -437,7 +438,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
                        }
                }
 
-               status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+               status = ocfs2_reserve_clusters(osb, 1, &data_ac);
                if (status < 0) {
                        if (status != -ENOSPC)
                                mlog_errno(status);
@@ -450,7 +451,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
                credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
        }
 
-       handle = ocfs2_start_trans(osb, handle, credits);
+       handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -496,7 +497,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
        get_bh(*new_de_bh);
 bail:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
 
        if (data_ac)
                ocfs2_free_alloc_context(data_ac);
index 5f614ec9649ca282a6aa106dc46b4d8d9d97a3c8..3f67e146864a695d32bfe7b724d283a825a403ab 100644 (file)
@@ -45,7 +45,7 @@ int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
                                 struct buffer_head **ret_de_bh);
 struct ocfs2_alloc_context;
 int ocfs2_do_extend_dir(struct super_block *sb,
-                       struct ocfs2_journal_handle *handle,
+                       handle_t *handle,
                        struct inode *dir,
                        struct buffer_head *parent_fe_bh,
                        struct ocfs2_alloc_context *data_ac,
index fa968180b07266576be02d1923a20ec84f2fbc79..6b6ff76538c59212d453385858612a2663f83b51 100644 (file)
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned
  * called functions that cannot be directly called from the
  * net message handlers for some reason, usually because
  * they need to send net messages of their own. */
-void dlm_dispatch_work(void *data);
+void dlm_dispatch_work(struct work_struct *work);
 
 struct dlm_lock_resource;
 struct dlm_work_item;
index 8d1065f8b3bdbaf7ccc5def928c4954ea3bd4a24..420a375a3949826b5b90e4007cdb0807b9f9b966 100644 (file)
@@ -68,7 +68,8 @@ static void **dlm_alloc_pagevec(int pages)
                        goto out_free;
 
        mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
-            pages, DLM_HASH_PAGES, (unsigned long)DLM_BUCKETS_PER_PAGE);
+            pages, (unsigned long)DLM_HASH_PAGES,
+            (unsigned long)DLM_BUCKETS_PER_PAGE);
        return vec;
 out_free:
        dlm_free_pagevec(vec, i);
@@ -1296,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
 
        spin_lock_init(&dlm->work_lock);
        INIT_LIST_HEAD(&dlm->work_list);
-       INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+       INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
 
        kref_init(&dlm->dlm_refs);
        dlm->dlm_state = DLM_CTXT_NEW;
index 16b8d1ba706662c15ee99db4fc20f07f20b10e1f..941acf14e61f60ab59e0e0eec91f48b643ea6587 100644 (file)
@@ -66,7 +66,7 @@ static struct file_operations dlmfs_file_operations;
 static struct inode_operations dlmfs_dir_inode_operations;
 static struct inode_operations dlmfs_root_inode_operations;
 static struct inode_operations dlmfs_file_inode_operations;
-static kmem_cache_t *dlmfs_inode_cache;
+static struct kmem_cache *dlmfs_inode_cache;
 
 struct workqueue_struct *user_dlm_worker;
 
@@ -257,7 +257,7 @@ static ssize_t dlmfs_file_write(struct file *filp,
 }
 
 static void dlmfs_init_once(void *foo,
-                           kmem_cache_t *cachep,
+                           struct kmem_cache *cachep,
                            unsigned long flags)
 {
        struct dlmfs_inode_private *ip =
@@ -276,7 +276,7 @@ static struct inode *dlmfs_alloc_inode(struct super_block *sb)
 {
        struct dlmfs_inode_private *ip;
 
-       ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS);
+       ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
        if (!ip)
                return NULL;
 
index f784177b62417bea10e474d568f45e07ddbb5781..856012b4fa4954534ac6d4471d2ebee25b9bdae9 100644 (file)
@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
 #endif  /*  0  */
 
 
-static kmem_cache_t *dlm_mle_cache = NULL;
+static struct kmem_cache *dlm_mle_cache = NULL;
 
 
 static void dlm_mle_release(struct kref *kref);
index 9d950d7cea38e02665ee7e705292c4f515d30e98..fb3e2b0817f17cc647c532ae492b30af57af36b9 100644 (file)
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
 }
 
 /* Worker function used during recovery. */
-void dlm_dispatch_work(void *data)
+void dlm_dispatch_work(struct work_struct *work)
 {
-       struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+       struct dlm_ctxt *dlm =
+               container_of(work, struct dlm_ctxt, dispatched_work);
        LIST_HEAD(tmp_list);
        struct list_head *iter, *iter2;
        struct dlm_work_item *item;
index eead48bbfac620d7b475287982823383be5ffe4b..7d2f578b267df7e8ae4f8cce2d46533f8f637e38 100644 (file)
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
                BUG();
 }
 
-static void user_dlm_unblock_lock(void *opaque);
+static void user_dlm_unblock_lock(struct work_struct *work);
 
 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
 {
        if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
                user_dlm_grab_inode_ref(lockres);
 
-               INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
-                         lockres);
+               INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
 
                queue_work(user_dlm_worker, &lockres->l_work);
                lockres->l_flags |= USER_LOCK_QUEUED;
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
        iput(inode);
 }
 
-static void user_dlm_unblock_lock(void *opaque)
+static void user_dlm_unblock_lock(struct work_struct *work)
 {
        int new_level, status;
-       struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+       struct user_lock_res *lockres =
+               container_of(work, struct user_lock_res, l_work);
        struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
 
        mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
index 8801e41afe8092ad9db9c8c469b5ff1cbfb94fbb..69fba16efbd1e08241ebe46551d12b506a442b15 100644 (file)
@@ -49,6 +49,7 @@
 #include "dcache.h"
 #include "dlmglue.h"
 #include "extent_map.h"
+#include "file.h"
 #include "heartbeat.h"
 #include "inode.h"
 #include "journal.h"
@@ -1063,10 +1064,10 @@ static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
        mlog_exit_void();
 }
 
-int ocfs2_create_new_lock(struct ocfs2_super *osb,
-                         struct ocfs2_lock_res *lockres,
-                         int ex,
-                         int local)
+static int ocfs2_create_new_lock(struct ocfs2_super *osb,
+                                struct ocfs2_lock_res *lockres,
+                                int ex,
+                                int local)
 {
        int level =  ex ? LKM_EXMODE : LKM_PRMODE;
        unsigned long flags;
@@ -1579,7 +1580,6 @@ static int ocfs2_assign_bh(struct inode *inode,
  * the result of the lock will be communicated via the callback.
  */
 int ocfs2_meta_lock_full(struct inode *inode,
-                        struct ocfs2_journal_handle *handle,
                         struct buffer_head **ret_bh,
                         int ex,
                         int arg_flags)
@@ -1668,12 +1668,6 @@ int ocfs2_meta_lock_full(struct inode *inode,
                }
        }
 
-       if (handle) {
-               status = ocfs2_handle_add_lock(handle, inode);
-               if (status < 0)
-                       mlog_errno(status);
-       }
-
 bail:
        if (status < 0) {
                if (ret_bh && (*ret_bh)) {
@@ -1713,18 +1707,16 @@ bail:
  * the lock inversion simply.
  */
 int ocfs2_meta_lock_with_page(struct inode *inode,
-                             struct ocfs2_journal_handle *handle,
                              struct buffer_head **ret_bh,
                              int ex,
                              struct page *page)
 {
        int ret;
 
-       ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
-                                  OCFS2_LOCK_NONBLOCK);
+       ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
        if (ret == -EAGAIN) {
                unlock_page(page);
-               if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
+               if (ocfs2_meta_lock(inode, ret_bh, ex) == 0)
                        ocfs2_meta_unlock(inode, ex);
                ret = AOP_TRUNCATED_PAGE;
        }
@@ -1732,6 +1724,44 @@ int ocfs2_meta_lock_with_page(struct inode *inode,
        return ret;
 }
 
+int ocfs2_meta_lock_atime(struct inode *inode,
+                         struct vfsmount *vfsmnt,
+                         int *level)
+{
+       int ret;
+
+       mlog_entry_void();
+       ret = ocfs2_meta_lock(inode, NULL, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               return ret;
+       }
+
+       /*
+        * If we should update atime, we will get EX lock,
+        * otherwise we just get PR lock.
+        */
+       if (ocfs2_should_update_atime(inode, vfsmnt)) {
+               struct buffer_head *bh = NULL;
+
+               ocfs2_meta_unlock(inode, 0);
+               ret = ocfs2_meta_lock(inode, &bh, 1);
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       return ret;
+               }
+               *level = 1;
+               if (ocfs2_should_update_atime(inode, vfsmnt))
+                       ocfs2_update_inode_atime(inode, bh);
+               if (bh)
+                       brelse(bh);
+       } else
+               *level = 0;
+
+       mlog_exit(ret);
+       return ret;
+}
+
 void ocfs2_meta_unlock(struct inode *inode,
                       int ex)
 {
index 4a2769387229a90cb3e0be5e1abaf56be217382d..c343fca68cf1ec1643e292f560c8dbf7d7b28414 100644 (file)
@@ -68,8 +68,6 @@ void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
                                u64 parent, struct inode *inode);
 void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
 int ocfs2_create_new_inode_locks(struct inode *inode);
-int ocfs2_create_new_lock(struct ocfs2_super *osb,
-                         struct ocfs2_lock_res *lockres, int ex, int local);
 int ocfs2_drop_inode_locks(struct inode *inode);
 int ocfs2_data_lock_full(struct inode *inode,
                         int write,
@@ -82,19 +80,20 @@ void ocfs2_data_unlock(struct inode *inode,
                       int write);
 int ocfs2_rw_lock(struct inode *inode, int write);
 void ocfs2_rw_unlock(struct inode *inode, int write);
+int ocfs2_meta_lock_atime(struct inode *inode,
+                         struct vfsmount *vfsmnt,
+                         int *level);
 int ocfs2_meta_lock_full(struct inode *inode,
-                        struct ocfs2_journal_handle *handle,
                         struct buffer_head **ret_bh,
                         int ex,
                         int arg_flags);
 int ocfs2_meta_lock_with_page(struct inode *inode,
-                             struct ocfs2_journal_handle *handle,
                              struct buffer_head **ret_bh,
                              int ex,
                              struct page *page);
 /* 99% of the time we don't want to supply any additional flags --
  * those are for very specific cases only. */
-#define ocfs2_meta_lock(i, h, b, e) ocfs2_meta_lock_full(i, h, b, e, 0)
+#define ocfs2_meta_lock(i, b, e) ocfs2_meta_lock_full(i, b, e, 0)
 void ocfs2_meta_unlock(struct inode *inode,
                       int ex);
 int ocfs2_super_lock(struct ocfs2_super *osb,
index fb91089a60a78a6a7b36d5c2b2f711088410ed9e..06be6e774cf943e08c41b616fc2d99ceb17efc11 100644 (file)
@@ -100,7 +100,7 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
        mlog(0, "find parent of directory %llu\n",
             (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
-       status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+       status = ocfs2_meta_lock(dir, NULL, 0);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
index fcd4475d1f899754bb5ed61fa069b149a5e48c20..80ac69f11d9f4d2354fa5dab987e4a1904a5547e 100644 (file)
@@ -61,7 +61,7 @@ struct ocfs2_em_insert_context {
        struct ocfs2_extent_map_entry *right_ent;
 };
 
-static kmem_cache_t *ocfs2_em_ent_cachep = NULL;
+static struct kmem_cache *ocfs2_em_ent_cachep = NULL;
 
 
 static struct ocfs2_extent_map_entry *
index 1be74c4e78148f4cd79df11a62c6796fc7b642e4..8786b3c490aa5ff32da1277196204173c1ca1440 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/pagemap.h>
 #include <linux/uio.h>
 #include <linux/sched.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/mount.h>
 
 #define MLOG_MASK_PREFIX ML_INODE
 #include <cluster/masklog.h>
@@ -134,7 +136,58 @@ bail:
        return (err < 0) ? -EIO : 0;
 }
 
-int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+int ocfs2_should_update_atime(struct inode *inode,
+                             struct vfsmount *vfsmnt)
+{
+       struct timespec now;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+       if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+               return 0;
+
+       if ((inode->i_flags & S_NOATIME) ||
+           ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+               return 0;
+
+       if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
+           ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
+               return 0;
+
+       now = CURRENT_TIME;
+       if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
+               return 0;
+       else
+               return 1;
+}
+
+int ocfs2_update_inode_atime(struct inode *inode,
+                            struct buffer_head *bh)
+{
+       int ret;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       handle_t *handle;
+
+       mlog_entry_void();
+
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (handle == NULL) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       inode->i_atime = CURRENT_TIME;
+       ret = ocfs2_mark_inode_dirty(handle, inode, bh);
+       if (ret < 0)
+               mlog_errno(ret);
+
+       ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out:
+       mlog_exit(ret);
+       return ret;
+}
+
+int ocfs2_set_inode_size(handle_t *handle,
                         struct inode *inode,
                         struct buffer_head *fe_bh,
                         u64 new_i_size)
@@ -163,10 +216,9 @@ static int ocfs2_simple_size_update(struct inode *inode,
 {
        int ret;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
 
-       handle = ocfs2_start_trans(osb, NULL,
-                                  OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (handle == NULL) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -178,7 +230,7 @@ static int ocfs2_simple_size_update(struct inode *inode,
        if (ret < 0)
                mlog_errno(ret);
 
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 out:
        return ret;
 }
@@ -189,14 +241,14 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
                                     u64 new_i_size)
 {
        int status;
-       struct ocfs2_journal_handle *handle;
+       handle_t *handle;
 
        mlog_entry_void();
 
        /* TODO: This needs to actually orphan the inode in this
         * transaction. */
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                mlog_errno(status);
@@ -207,7 +259,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
        if (status < 0)
                mlog_errno(status);
 
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 out:
        mlog_exit(status);
        return status;
@@ -328,7 +380,7 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
                               struct inode *inode,
                               u32 clusters_to_add,
                               struct buffer_head *fe_bh,
-                              struct ocfs2_journal_handle *handle,
+                              handle_t *handle,
                               struct ocfs2_alloc_context *data_ac,
                               struct ocfs2_alloc_context *meta_ac,
                               enum ocfs2_alloc_restarted *reason_ret)
@@ -433,7 +485,7 @@ static int ocfs2_extend_allocation(struct inode *inode,
        u32 prev_clusters;
        struct buffer_head *bh = NULL;
        struct ocfs2_dinode *fe = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_alloc_context *meta_ac = NULL;
        enum ocfs2_alloc_restarted why;
@@ -463,13 +515,6 @@ restart_all:
             (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
             fe->i_clusters, clusters_to_add);
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto leave;
-       }
-
        num_free_extents = ocfs2_num_free_extents(osb,
                                                  inode,
                                                  fe);
@@ -480,10 +525,7 @@ restart_all:
        }
 
        if (!num_free_extents) {
-               status = ocfs2_reserve_new_metadata(osb,
-                                                   handle,
-                                                   fe,
-                                                   &meta_ac);
+               status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac);
                if (status < 0) {
                        if (status != -ENOSPC)
                                mlog_errno(status);
@@ -491,10 +533,7 @@ restart_all:
                }
        }
 
-       status = ocfs2_reserve_clusters(osb,
-                                       handle,
-                                       clusters_to_add,
-                                       &data_ac);
+       status = ocfs2_reserve_clusters(osb, clusters_to_add, &data_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -509,7 +548,7 @@ restart_all:
        drop_alloc_sem = 1;
 
        credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
-       handle = ocfs2_start_trans(osb, handle, credits);
+       handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -589,7 +628,7 @@ leave:
                drop_alloc_sem = 0;
        }
        if (handle) {
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
                handle = NULL;
        }
        if (data_ac) {
@@ -624,7 +663,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
        struct page *page;
        unsigned long index;
        unsigned int offset;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        int ret;
 
        offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
@@ -668,7 +707,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
                ret = 0;
 
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
 out_unlock:
        unlock_page(page);
        page_cache_release(page);
@@ -789,7 +828,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
        struct super_block *sb = inode->i_sb;
        struct ocfs2_super *osb = OCFS2_SB(sb);
        struct buffer_head *bh = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
 
        mlog_entry("(0x%p, '%.*s')\n", dentry,
                   dentry->d_name.len, dentry->d_name.name);
@@ -825,7 +864,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                }
        }
 
-       status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+       status = ocfs2_meta_lock(inode, &bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -845,7 +884,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                }
        }
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                mlog_errno(status);
@@ -863,7 +902,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                mlog_errno(status);
 
 bail_commit:
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 bail_unlock:
        ocfs2_meta_unlock(inode, 1);
 bail_unlock_rw:
@@ -906,19 +945,41 @@ bail:
        return err;
 }
 
+int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+       int ret;
+
+       mlog_entry_void();
+
+       ret = ocfs2_meta_lock(inode, NULL, 0);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = generic_permission(inode, mask, NULL);
+       if (ret)
+               mlog_errno(ret);
+
+       ocfs2_meta_unlock(inode, 0);
+out:
+       mlog_exit(ret);
+       return ret;
+}
+
 static int ocfs2_write_remove_suid(struct inode *inode)
 {
        int ret;
        struct buffer_head *bh = NULL;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
-       struct ocfs2_journal_handle *handle;
+       handle_t *handle;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_dinode *di;
 
        mlog_entry("(Inode %llu, mode 0%o)\n",
                   (unsigned long long)oi->ip_blkno, inode->i_mode);
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (handle == NULL) {
                ret = -ENOMEM;
                mlog_errno(ret);
@@ -951,75 +1012,29 @@ static int ocfs2_write_remove_suid(struct inode *inode)
 out_bh:
        brelse(bh);
 out_trans:
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 out:
        mlog_exit(ret);
        return ret;
 }
 
-static inline int ocfs2_write_should_remove_suid(struct inode *inode)
-{
-       mode_t mode = inode->i_mode;
-
-       if (!capable(CAP_FSETID)) {
-               if (unlikely(mode & S_ISUID))
-                       return 1;
-
-               if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
-                       return 1;
-       }
-       return 0;
-}
-
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs,
-                                   loff_t pos)
+static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
+                                        loff_t *ppos,
+                                        size_t count,
+                                        int appending)
 {
-       int ret, rw_level = -1, meta_level = -1, have_alloc_sem = 0;
+       int ret = 0, meta_level = appending;
+       struct inode *inode = dentry->d_inode;
        u32 clusters;
-       struct file *filp = iocb->ki_filp;
-       struct inode *inode = filp->f_dentry->d_inode;
        loff_t newsize, saved_pos;
 
-       mlog_entry("(0x%p, %u, '%.*s')\n", filp,
-                  (unsigned int)nr_segs,
-                  filp->f_dentry->d_name.len,
-                  filp->f_dentry->d_name.name);
-
-       /* happy write of zero bytes */
-       if (iocb->ki_left == 0)
-               return 0;
-
-       if (!inode) {
-               mlog(0, "bad inode\n");
-               return -EIO;
-       }
-
-       mutex_lock(&inode->i_mutex);
-       /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
-       if (filp->f_flags & O_DIRECT) {
-               have_alloc_sem = 1;
-               down_read(&inode->i_alloc_sem);
-       }
-
-       /* concurrent O_DIRECT writes are allowed */
-       rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1;
-       ret = ocfs2_rw_lock(inode, rw_level);
-       if (ret < 0) {
-               rw_level = -1;
-               mlog_errno(ret);
-               goto out;
-       }
-
        /* 
         * We sample i_size under a read level meta lock to see if our write
         * is extending the file, if it is we back off and get a write level
         * meta lock.
         */
-       meta_level = (filp->f_flags & O_APPEND) ? 1 : 0;
        for(;;) {
-               ret = ocfs2_meta_lock(inode, NULL, NULL, meta_level);
+               ret = ocfs2_meta_lock(inode, NULL, meta_level);
                if (ret < 0) {
                        meta_level = -1;
                        mlog_errno(ret);
@@ -1035,7 +1050,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                 * inode. There's also the dinode i_size state which
                 * can be lost via setattr during extending writes (we
                 * set inode->i_size at the end of a write. */
-               if (ocfs2_write_should_remove_suid(inode)) {
+               if (should_remove_suid(dentry)) {
                        if (meta_level == 0) {
                                ocfs2_meta_unlock(inode, meta_level);
                                meta_level = 1;
@@ -1045,19 +1060,19 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                        ret = ocfs2_write_remove_suid(inode);
                        if (ret < 0) {
                                mlog_errno(ret);
-                               goto out;
+                               goto out_unlock;
                        }
                }
 
                /* work on a copy of ppos until we're sure that we won't have
                 * to recalculate it due to relocking. */
-               if (filp->f_flags & O_APPEND) {
+               if (appending) {
                        saved_pos = i_size_read(inode);
                        mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
                } else {
-                       saved_pos = iocb->ki_pos;
+                       saved_pos = *ppos;
                }
-               newsize = iocb->ki_left + saved_pos;
+               newsize = count + saved_pos;
 
                mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
                     (long long) saved_pos, (long long) newsize,
@@ -1090,19 +1105,66 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                if (!clusters)
                        break;
 
-               ret = ocfs2_extend_file(inode, NULL, newsize, iocb->ki_left);
+               ret = ocfs2_extend_file(inode, NULL, newsize, count);
                if (ret < 0) {
                        if (ret != -ENOSPC)
                                mlog_errno(ret);
-                       goto out;
+                       goto out_unlock;
                }
                break;
        }
 
-       /* ok, we're done with i_size and alloc work */
-       iocb->ki_pos = saved_pos;
+       if (appending)
+               *ppos = saved_pos;
+
+out_unlock:
        ocfs2_meta_unlock(inode, meta_level);
-       meta_level = -1;
+
+out:
+       return ret;
+}
+
+static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
+                                   const struct iovec *iov,
+                                   unsigned long nr_segs,
+                                   loff_t pos)
+{
+       int ret, rw_level, have_alloc_sem = 0;
+       struct file *filp = iocb->ki_filp;
+       struct inode *inode = filp->f_dentry->d_inode;
+       int appending = filp->f_flags & O_APPEND ? 1 : 0;
+
+       mlog_entry("(0x%p, %u, '%.*s')\n", filp,
+                  (unsigned int)nr_segs,
+                  filp->f_dentry->d_name.len,
+                  filp->f_dentry->d_name.name);
+
+       /* happy write of zero bytes */
+       if (iocb->ki_left == 0)
+               return 0;
+
+       mutex_lock(&inode->i_mutex);
+       /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
+       if (filp->f_flags & O_DIRECT) {
+               have_alloc_sem = 1;
+               down_read(&inode->i_alloc_sem);
+       }
+
+       /* concurrent O_DIRECT writes are allowed */
+       rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1;
+       ret = ocfs2_rw_lock(inode, rw_level);
+       if (ret < 0) {
+               rw_level = -1;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_prepare_inode_for_write(filp->f_dentry, &iocb->ki_pos,
+                                           iocb->ki_left, appending);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
 
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb);
@@ -1128,8 +1190,6 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
        }
 
 out:
-       if (meta_level != -1)
-               ocfs2_meta_unlock(inode, meta_level);
        if (have_alloc_sem)
                up_read(&inode->i_alloc_sem);
        if (rw_level != -1) 
@@ -1140,12 +1200,83 @@ out:
        return ret;
 }
 
+static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
+                                      struct file *out,
+                                      loff_t *ppos,
+                                      size_t len,
+                                      unsigned int flags)
+{
+       int ret;
+       struct inode *inode = out->f_dentry->d_inode;
+
+       mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
+                  (unsigned int)len,
+                  out->f_dentry->d_name.len,
+                  out->f_dentry->d_name.name);
+
+       inode_double_lock(inode, pipe->inode);
+
+       ret = ocfs2_rw_lock(inode, 1);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_prepare_inode_for_write(out->f_dentry, ppos, len, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out_unlock;
+       }
+
+       /* ok, we're done with i_size and alloc work */
+       ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
+
+out_unlock:
+       ocfs2_rw_unlock(inode, 1);
+out:
+       inode_double_unlock(inode, pipe->inode);
+
+       mlog_exit(ret);
+       return ret;
+}
+
+static ssize_t ocfs2_file_splice_read(struct file *in,
+                                     loff_t *ppos,
+                                     struct pipe_inode_info *pipe,
+                                     size_t len,
+                                     unsigned int flags)
+{
+       int ret = 0;
+       struct inode *inode = in->f_dentry->d_inode;
+
+       mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
+                  (unsigned int)len,
+                  in->f_dentry->d_name.len,
+                  in->f_dentry->d_name.name);
+
+       /*
+        * See the comment in ocfs2_file_aio_read()
+        */
+       ret = ocfs2_meta_lock(inode, NULL, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto bail;
+       }
+       ocfs2_meta_unlock(inode, 0);
+
+       ret = generic_file_splice_read(in, ppos, pipe, len, flags);
+
+bail:
+       mlog_exit(ret);
+       return ret;
+}
+
 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
                                   const struct iovec *iov,
                                   unsigned long nr_segs,
                                   loff_t pos)
 {
-       int ret = 0, rw_level = -1, have_alloc_sem = 0;
+       int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
        struct file *filp = iocb->ki_filp;
        struct inode *inode = filp->f_dentry->d_inode;
 
@@ -1187,12 +1318,12 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         * like i_size. This allows the checks down below
         * generic_file_aio_read() a chance of actually working. 
         */
-       ret = ocfs2_meta_lock(inode, NULL, NULL, 0);
+       ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
        if (ret < 0) {
                mlog_errno(ret);
                goto bail;
        }
-       ocfs2_meta_unlock(inode, 0);
+       ocfs2_meta_unlock(inode, lock_level);
 
        ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
        if (ret == -EINVAL)
@@ -1220,11 +1351,13 @@ bail:
 struct inode_operations ocfs2_file_iops = {
        .setattr        = ocfs2_setattr,
        .getattr        = ocfs2_getattr,
+       .permission     = ocfs2_permission,
 };
 
 struct inode_operations ocfs2_special_file_iops = {
        .setattr        = ocfs2_setattr,
        .getattr        = ocfs2_getattr,
+       .permission     = ocfs2_permission,
 };
 
 const struct file_operations ocfs2_fops = {
@@ -1238,6 +1371,8 @@ const struct file_operations ocfs2_fops = {
        .aio_read       = ocfs2_file_aio_read,
        .aio_write      = ocfs2_file_aio_write,
        .ioctl          = ocfs2_ioctl,
+       .splice_read    = ocfs2_file_splice_read,
+       .splice_write   = ocfs2_file_splice_write,
 };
 
 const struct file_operations ocfs2_dops = {
index 740c9e7ca5993b5fd86ab9fefa1a55cdacbb9a21..601a453f18a85d3e4ba0bb7d1d9848c0f7f9f3fa 100644 (file)
@@ -41,17 +41,24 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
                               struct inode *inode,
                               u32 clusters_to_add,
                               struct buffer_head *fe_bh,
-                              struct ocfs2_journal_handle *handle,
+                              handle_t *handle,
                               struct ocfs2_alloc_context *data_ac,
                               struct ocfs2_alloc_context *meta_ac,
                               enum ocfs2_alloc_restarted *reason);
 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
                  struct kstat *stat);
+int ocfs2_permission(struct inode *inode, int mask,
+                    struct nameidata *nd);
 
-int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+int ocfs2_set_inode_size(handle_t *handle,
                         struct inode *inode,
                         struct buffer_head *fe_bh,
                         u64 new_i_size);
 
+int ocfs2_should_update_atime(struct inode *inode,
+                             struct vfsmount *vfsmnt);
+int ocfs2_update_inode_atime(struct inode *inode,
+                            struct buffer_head *bh);
+
 #endif /* OCFS2_FILE_H */
index 16e8e74dc966d452401d55a8381e5d89afa64737..42e361f3054f5114d8e81fe911ca2fb21c86601d 100644 (file)
@@ -360,7 +360,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
                                  inode);
 
        ocfs2_set_inode_flags(inode);
-       inode->i_flags |= S_NOATIME;
 
        status = 0;
 bail:
@@ -441,7 +440,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
                                  generation, inode);
 
        if (can_lock) {
-               status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+               status = ocfs2_meta_lock(inode, NULL, 0);
                if (status) {
                        make_bad_inode(inode);
                        mlog_errno(status);
@@ -512,7 +511,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
                                     struct buffer_head *fe_bh)
 {
        int status = 0;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_truncate_context *tc = NULL;
        struct ocfs2_dinode *fe;
 
@@ -524,7 +523,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
        if (!fe->i_clusters)
                goto bail;
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -538,7 +537,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
                goto bail;
        }
 
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
        handle = NULL;
 
        status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc);
@@ -554,7 +553,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
        }
 bail:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
 
        mlog_exit(status);
        return status;
@@ -568,7 +567,7 @@ static int ocfs2_remove_inode(struct inode *inode,
        int status;
        struct inode *inode_alloc_inode = NULL;
        struct buffer_head *inode_alloc_bh = NULL;
-       struct ocfs2_journal_handle *handle;
+       handle_t *handle;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
 
@@ -582,7 +581,7 @@ static int ocfs2_remove_inode(struct inode *inode,
        }
 
        mutex_lock(&inode_alloc_inode->i_mutex);
-       status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1);
+       status = ocfs2_meta_lock(inode_alloc_inode, &inode_alloc_bh, 1);
        if (status < 0) {
                mutex_unlock(&inode_alloc_inode->i_mutex);
 
@@ -590,7 +589,7 @@ static int ocfs2_remove_inode(struct inode *inode,
                goto bail;
        }
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_DELETE_INODE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_DELETE_INODE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                mlog_errno(status);
@@ -629,7 +628,7 @@ static int ocfs2_remove_inode(struct inode *inode,
                mlog_errno(status);
 
 bail_commit:
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 bail_unlock:
        ocfs2_meta_unlock(inode_alloc_inode, 1);
        mutex_unlock(&inode_alloc_inode->i_mutex);
@@ -705,7 +704,7 @@ static int ocfs2_wipe_inode(struct inode *inode,
         * delete_inode operation. We do this now to avoid races with
         * recovery completion on other nodes. */
        mutex_lock(&orphan_dir_inode->i_mutex);
-       status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1);
+       status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1);
        if (status < 0) {
                mutex_unlock(&orphan_dir_inode->i_mutex);
 
@@ -933,7 +932,7 @@ void ocfs2_delete_inode(struct inode *inode)
         * allocation lock here as it won't be needed - nobody will
         * have the file open.
         */
-       status = ocfs2_meta_lock(inode, NULL, &di_bh, 1);
+       status = ocfs2_meta_lock(inode, &di_bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -1067,12 +1066,6 @@ void ocfs2_clear_inode(struct inode *inode)
        mlog_bug_on_msg(oi->ip_open_count,
                        "Clear inode of %llu has open count %d\n",
                        (unsigned long long)oi->ip_blkno, oi->ip_open_count);
-       mlog_bug_on_msg(!list_empty(&oi->ip_handle_list),
-                       "Clear inode of %llu has non empty handle list\n",
-                       (unsigned long long)oi->ip_blkno);
-       mlog_bug_on_msg(oi->ip_handle,
-                       "Clear inode of %llu has non empty handle pointer\n",
-                       (unsigned long long)oi->ip_blkno);
 
        /* Clear all other flags. */
        oi->ip_flags = OCFS2_INODE_CACHE_INLINE;
@@ -1186,7 +1179,7 @@ int ocfs2_inode_revalidate(struct dentry *dentry)
 
        /* Let ocfs2_meta_lock do the work of updating our struct
         * inode for us. */
-       status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+       status = ocfs2_meta_lock(inode, NULL, 0);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -1204,7 +1197,7 @@ bail:
  * struct inode.
  * Only takes ip_lock.
  */
-int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_mark_inode_dirty(handle_t *handle,
                           struct inode *inode,
                           struct buffer_head *bh)
 {
index 9957810fdf85e3b712cc0eef92e9f1606b787309..1a7dd2945b34a7bd5f608dddea23b17f4e2d4b9a 100644 (file)
@@ -48,13 +48,6 @@ struct ocfs2_inode_info
 
        struct mutex                    ip_io_mutex;
 
-       /* Used by the journalling code to attach an inode to a
-        * handle.  These are protected by ip_io_mutex in order to lock
-        * out other I/O to the inode until we either commit or
-        * abort. */
-       struct list_head                ip_handle_list;
-       struct ocfs2_journal_handle     *ip_handle;
-
        u32                             ip_flags; /* see below */
        u32                             ip_attr; /* inode attributes */
 
@@ -113,7 +106,7 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
 #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
 #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
 
-extern kmem_cache_t *ocfs2_inode_cache;
+extern struct kmem_cache *ocfs2_inode_cache;
 
 extern const struct address_space_operations ocfs2_aops;
 
@@ -143,7 +136,7 @@ ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf,
 void ocfs2_sync_blockdev(struct super_block *sb);
 void ocfs2_refresh_inode(struct inode *inode,
                         struct ocfs2_dinode *fe);
-int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_mark_inode_dirty(handle_t *handle,
                           struct inode *inode,
                           struct buffer_head *bh);
 int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb);
index 3663cef806897ce30436b0555b6f0be9834c428c..4768be5f3086f00414718f246f5eae0774bf5421 100644 (file)
@@ -26,7 +26,7 @@ static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
 {
        int status;
 
-       status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+       status = ocfs2_meta_lock(inode, NULL, 0);
        if (status < 0) {
                mlog_errno(status);
                return status;
@@ -43,14 +43,14 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
 {
        struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct buffer_head *bh = NULL;
        unsigned oldflags;
        int status;
 
        mutex_lock(&inode->i_mutex);
 
-       status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+       status = ocfs2_meta_lock(inode, &bh, 1);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
@@ -67,7 +67,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if (!S_ISDIR(inode->i_mode))
                flags &= ~OCFS2_DIRSYNC_FL;
 
-       handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                mlog_errno(status);
@@ -96,7 +96,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
        if (status < 0)
                mlog_errno(status);
 
-       ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
 bail_unlock:
        ocfs2_meta_unlock(inode, 1);
 bail:
index fd9734def551a8da3d82745f565b8ed1f084cced..1d7f4ab1e5ede4b362a5e8f2bcd4523269c5a456 100644 (file)
@@ -57,9 +57,6 @@ static int ocfs2_recover_node(struct ocfs2_super *osb,
 static int __ocfs2_recovery_thread(void *arg);
 static int ocfs2_commit_cache(struct ocfs2_super *osb);
 static int ocfs2_wait_on_mount(struct ocfs2_super *osb);
-static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
-                                      struct ocfs2_journal_handle *handle);
-static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle);
 static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
                                      int dirty);
 static int ocfs2_trylock_journal(struct ocfs2_super *osb,
@@ -113,46 +110,18 @@ finally:
        return status;
 }
 
-struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
-{
-       struct ocfs2_journal_handle *retval = NULL;
-
-       retval = kcalloc(1, sizeof(*retval), GFP_NOFS);
-       if (!retval) {
-               mlog(ML_ERROR, "Failed to allocate memory for journal "
-                    "handle!\n");
-               return NULL;
-       }
-
-       retval->max_buffs = 0;
-       retval->num_locks = 0;
-       retval->k_handle = NULL;
-
-       INIT_LIST_HEAD(&retval->locks);
-       INIT_LIST_HEAD(&retval->inode_list);
-       retval->journal = osb->journal;
-
-       return retval;
-}
-
 /* pass it NULL and it will allocate a new handle object for you.  If
  * you pass it a handle however, it may still return error, in which
  * case it has free'd the passed handle for you. */
-struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
-                                              struct ocfs2_journal_handle *handle,
-                                              int max_buffs)
+handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
 {
-       int ret;
        journal_t *journal = osb->journal->j_journal;
-
-       mlog_entry("(max_buffs = %d)\n", max_buffs);
+       handle_t *handle;
 
        BUG_ON(!osb || !osb->journal->j_journal);
 
-       if (ocfs2_is_hard_readonly(osb)) {
-               ret = -EROFS;
-               goto done_free;
-       }
+       if (ocfs2_is_hard_readonly(osb))
+               return ERR_PTR(-EROFS);
 
        BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
        BUG_ON(max_buffs <= 0);
@@ -163,154 +132,39 @@ struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
                BUG();
        }
 
-       if (!handle)
-               handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               ret = -ENOMEM;
-               mlog(ML_ERROR, "Failed to allocate memory for journal "
-                    "handle!\n");
-               goto done_free;
-       }
-
-       handle->max_buffs = max_buffs;
-
        down_read(&osb->journal->j_trans_barrier);
 
-       /* actually start the transaction now */
-       handle->k_handle = journal_start(journal, max_buffs);
-       if (IS_ERR(handle->k_handle)) {
+       handle = journal_start(journal, max_buffs);
+       if (IS_ERR(handle)) {
                up_read(&osb->journal->j_trans_barrier);
 
-               ret = PTR_ERR(handle->k_handle);
-               handle->k_handle = NULL;
-               mlog_errno(ret);
+               mlog_errno(PTR_ERR(handle));
 
                if (is_journal_aborted(journal)) {
                        ocfs2_abort(osb->sb, "Detected aborted journal");
-                       ret = -EROFS;
+                       handle = ERR_PTR(-EROFS);
                }
-               goto done_free;
-       }
-
-       atomic_inc(&(osb->journal->j_num_trans));
-       handle->flags |= OCFS2_HANDLE_STARTED;
+       } else
+               atomic_inc(&(osb->journal->j_num_trans));
 
-       mlog_exit_ptr(handle);
        return handle;
-
-done_free:
-       if (handle)
-               ocfs2_commit_unstarted_handle(handle); /* will kfree handle */
-
-       mlog_exit(ret);
-       return ERR_PTR(ret);
-}
-
-void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
-                           struct inode *inode)
-{
-       BUG_ON(!handle);
-       BUG_ON(!inode);
-
-       atomic_inc(&inode->i_count);
-
-       /* we're obviously changing it... */
-       mutex_lock(&inode->i_mutex);
-
-       /* sanity check */
-       BUG_ON(OCFS2_I(inode)->ip_handle);
-       BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
-
-       OCFS2_I(inode)->ip_handle = handle;
-       list_move_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
-}
-
-static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
-{
-       struct list_head *p, *n;
-       struct inode *inode;
-       struct ocfs2_inode_info *oi;
-
-       list_for_each_safe(p, n, &handle->inode_list) {
-               oi = list_entry(p, struct ocfs2_inode_info,
-                               ip_handle_list);
-               inode = &oi->vfs_inode;
-
-               OCFS2_I(inode)->ip_handle = NULL;
-               list_del_init(&OCFS2_I(inode)->ip_handle_list);
-
-               mutex_unlock(&inode->i_mutex);
-               iput(inode);
-       }
-}
-
-/* This is trivial so we do it out of the main commit
- * paths. Beware, it can be called from start_trans too! */
-static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle)
-{
-       mlog_entry_void();
-
-       BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
-
-       ocfs2_handle_unlock_inodes(handle);
-       /* You are allowed to add journal locks before the transaction
-        * has started. */
-       ocfs2_handle_cleanup_locks(handle->journal, handle);
-
-       kfree(handle);
-
-       mlog_exit_void();
 }
 
-void ocfs2_commit_trans(struct ocfs2_journal_handle *handle)
+int ocfs2_commit_trans(struct ocfs2_super *osb,
+                      handle_t *handle)
 {
-       handle_t *jbd_handle;
-       int retval;
-       struct ocfs2_journal *journal = handle->journal;
-
-       mlog_entry_void();
+       int ret;
+       struct ocfs2_journal *journal = osb->journal;
 
        BUG_ON(!handle);
 
-       if (!(handle->flags & OCFS2_HANDLE_STARTED)) {
-               ocfs2_commit_unstarted_handle(handle);
-               mlog_exit_void();
-               return;
-       }
-
-       /* release inode semaphores we took during this transaction */
-       ocfs2_handle_unlock_inodes(handle);
-
-       /* ocfs2_extend_trans may have had to call journal_restart
-        * which will always commit the transaction, but may return
-        * error for any number of reasons. If this is the case, we
-        * clear k_handle as it's not valid any more. */
-       if (handle->k_handle) {
-               jbd_handle = handle->k_handle;
-
-               if (handle->flags & OCFS2_HANDLE_SYNC)
-                       jbd_handle->h_sync = 1;
-               else
-                       jbd_handle->h_sync = 0;
-
-               /* actually stop the transaction. if we've set h_sync,
-                * it'll have been committed when we return */
-               retval = journal_stop(jbd_handle);
-               if (retval < 0) {
-                       mlog_errno(retval);
-                       mlog(ML_ERROR, "Could not commit transaction\n");
-                       BUG();
-               }
-
-               handle->k_handle = NULL; /* it's been free'd in journal_stop */
-       }
-
-       ocfs2_handle_cleanup_locks(journal, handle);
+       ret = journal_stop(handle);
+       if (ret < 0)
+               mlog_errno(ret);
 
        up_read(&journal->j_trans_barrier);
 
-       kfree(handle);
-       mlog_exit_void();
+       return ret;
 }
 
 /*
@@ -326,20 +180,18 @@ void ocfs2_commit_trans(struct ocfs2_journal_handle *handle)
  * good because transaction ids haven't yet been recorded on the
  * cluster locks associated with this handle.
  */
-int ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
-                      int nblocks)
+int ocfs2_extend_trans(handle_t *handle, int nblocks)
 {
        int status;
 
        BUG_ON(!handle);
-       BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
        BUG_ON(!nblocks);
 
        mlog_entry_void();
 
        mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
 
-       status = journal_extend(handle->k_handle, nblocks);
+       status = journal_extend(handle, nblocks);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
@@ -347,15 +199,12 @@ int ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
 
        if (status > 0) {
                mlog(0, "journal_extend failed, trying journal_restart\n");
-               status = journal_restart(handle->k_handle, nblocks);
+               status = journal_restart(handle, nblocks);
                if (status < 0) {
-                       handle->k_handle = NULL;
                        mlog_errno(status);
                        goto bail;
                }
-               handle->max_buffs = nblocks;
-       } else
-               handle->max_buffs += nblocks;
+       }
 
        status = 0;
 bail:
@@ -364,7 +213,7 @@ bail:
        return status;
 }
 
-int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+int ocfs2_journal_access(handle_t *handle,
                         struct inode *inode,
                         struct buffer_head *bh,
                         int type)
@@ -374,7 +223,6 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
        BUG_ON(!inode);
        BUG_ON(!handle);
        BUG_ON(!bh);
-       BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
 
        mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n",
                   (unsigned long long)bh->b_blocknr, type,
@@ -403,11 +251,11 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
        switch (type) {
        case OCFS2_JOURNAL_ACCESS_CREATE:
        case OCFS2_JOURNAL_ACCESS_WRITE:
-               status = journal_get_write_access(handle->k_handle, bh);
+               status = journal_get_write_access(handle, bh);
                break;
 
        case OCFS2_JOURNAL_ACCESS_UNDO:
-               status = journal_get_undo_access(handle->k_handle, bh);
+               status = journal_get_undo_access(handle, bh);
                break;
 
        default:
@@ -424,17 +272,15 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
        return status;
 }
 
-int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+int ocfs2_journal_dirty(handle_t *handle,
                        struct buffer_head *bh)
 {
        int status;
 
-       BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
-
        mlog_entry("(bh->b_blocknr=%llu)\n",
                   (unsigned long long)bh->b_blocknr);
 
-       status = journal_dirty_metadata(handle->k_handle, bh);
+       status = journal_dirty_metadata(handle, bh);
        if (status < 0)
                mlog(ML_ERROR, "Could not dirty metadata buffer. "
                     "(bh->b_blocknr=%llu)\n",
@@ -456,59 +302,6 @@ int ocfs2_journal_dirty_data(handle_t *handle,
        return err;
 }
 
-/* We always assume you're adding a metadata lock at level 'ex' */
-int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
-                         struct inode *inode)
-{
-       int status;
-       struct ocfs2_journal_lock *lock;
-
-       BUG_ON(!inode);
-
-       lock = kmem_cache_alloc(ocfs2_lock_cache, GFP_NOFS);
-       if (!lock) {
-               status = -ENOMEM;
-               mlog_errno(-ENOMEM);
-               goto bail;
-       }
-
-       if (!igrab(inode))
-               BUG();
-       lock->jl_inode = inode;
-
-       list_add_tail(&(lock->jl_lock_list), &(handle->locks));
-       handle->num_locks++;
-
-       status = 0;
-bail:
-       mlog_exit(status);
-       return status;
-}
-
-static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
-                                      struct ocfs2_journal_handle *handle)
-{
-       struct list_head *p, *n;
-       struct ocfs2_journal_lock *lock;
-       struct inode *inode;
-
-       list_for_each_safe(p, n, &(handle->locks)) {
-               lock = list_entry(p, struct ocfs2_journal_lock,
-                                 jl_lock_list);
-               list_del(&lock->jl_lock_list);
-               handle->num_locks--;
-
-               inode = lock->jl_inode;
-               ocfs2_meta_unlock(inode, 1);
-               if (atomic_read(&inode->i_count) == 1)
-                       mlog(ML_ERROR,
-                            "Inode %llu, I'm doing a last iput for!",
-                            (unsigned long long)OCFS2_I(inode)->ip_blkno);
-               iput(inode);
-               kmem_cache_free(ocfs2_lock_cache, lock);
-       }
-}
-
 #define OCFS2_DEFAULT_COMMIT_INTERVAL  (HZ * 5)
 
 void ocfs2_set_journal_params(struct ocfs2_super *osb)
@@ -562,8 +355,7 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
        /* Skip recovery waits here - journal inode metadata never
         * changes in a live cluster so it can be considered an
         * exception to the rule. */
-       status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
-                                     OCFS2_META_LOCK_RECOVERY);
+       status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
        if (status < 0) {
                if (status != -ERESTARTSYS)
                        mlog(ML_ERROR, "Could not get lock on journal!\n");
@@ -911,11 +703,12 @@ struct ocfs2_la_recovery_item {
  * NOTE: This function can and will sleep on recovery of other nodes
  * during cluster locking, just like any other ocfs2 process.
  */
-void ocfs2_complete_recovery(void *data)
+void ocfs2_complete_recovery(struct work_struct *work)
 {
        int ret;
-       struct ocfs2_super *osb = data;
-       struct ocfs2_journal *journal = osb->journal;
+       struct ocfs2_journal *journal =
+               container_of(work, struct ocfs2_journal, j_recovery_work);
+       struct ocfs2_super *osb = journal->j_osb;
        struct ocfs2_dinode *la_dinode, *tl_dinode;
        struct ocfs2_la_recovery_item *item;
        struct list_head *p, *n;
@@ -1160,8 +953,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
        }
        SET_INODE_JOURNAL(inode);
 
-       status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
-                                     OCFS2_META_LOCK_RECOVERY);
+       status = ocfs2_meta_lock_full(inode, &bh, 1, OCFS2_META_LOCK_RECOVERY);
        if (status < 0) {
                mlog(0, "status returned from ocfs2_meta_lock=%d\n", status);
                if (status != -ERESTARTSYS)
@@ -1350,7 +1142,7 @@ static int ocfs2_trylock_journal(struct ocfs2_super *osb,
        SET_INODE_JOURNAL(inode);
 
        flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
-       status = ocfs2_meta_lock_full(inode, NULL, NULL, 1, flags);
+       status = ocfs2_meta_lock_full(inode, NULL, 1, flags);
        if (status < 0) {
                if (status != -EAGAIN)
                        mlog_errno(status);
@@ -1433,7 +1225,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
        }       
 
        mutex_lock(&orphan_dir_inode->i_mutex);
-       status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0);
+       status = ocfs2_meta_lock(orphan_dir_inode, NULL, 0);
        if (status < 0) {
                mlog_errno(status);
                goto out;
index 2f3a6acdac452e590e67e1235e3225c8975aeb8e..899112ad813679da5035903c57cc79b6c2a97aea 100644 (file)
@@ -37,7 +37,6 @@ enum ocfs2_journal_state {
 
 struct ocfs2_super;
 struct ocfs2_dinode;
-struct ocfs2_journal_handle;
 
 struct ocfs2_journal {
        enum ocfs2_journal_state   j_state;    /* Journals current state   */
@@ -133,46 +132,8 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
        spin_unlock(&trans_inc_lock);
 }
 
-extern kmem_cache_t *ocfs2_lock_cache;
-
-struct ocfs2_journal_lock {
-       struct inode     *jl_inode;
-       struct list_head  jl_lock_list;
-};
-
-struct ocfs2_journal_handle {
-       handle_t            *k_handle; /* kernel handle.                */
-       struct ocfs2_journal        *journal;
-       u32                 flags;     /* see flags below.              */
-       int                 max_buffs; /* Buffs reserved by this handle */
-
-       /* The following two fields are for ocfs2_handle_add_lock */
-       int                 num_locks;
-       struct list_head    locks;     /* A bunch of locks to
-                                       * release on commit. This
-                                       * should be a list_head */
-
-       struct list_head     inode_list;
-};
-
-#define OCFS2_HANDLE_STARTED                   1
-/* should we sync-commit this handle? */
-#define OCFS2_HANDLE_SYNC                      2
-static inline int ocfs2_handle_started(struct ocfs2_journal_handle *handle)
-{
-       return handle->flags & OCFS2_HANDLE_STARTED;
-}
-
-static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, int sync)
-{
-       if (sync)
-               handle->flags |= OCFS2_HANDLE_SYNC;
-       else
-               handle->flags &= ~OCFS2_HANDLE_SYNC;
-}
-
 /* Exported only for the journal struct init code in super.c. Do not call. */
-void ocfs2_complete_recovery(void *data);
+void ocfs2_complete_recovery(struct work_struct *work);
 
 /*
  *  Journal Control:
@@ -231,15 +192,14 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
  *  Transaction Handling:
  *  Manage the lifetime of a transaction handle.
  *
- *  ocfs2_alloc_handle     - Only allocate a handle so we can start putting
- *                          cluster locks on it. To actually change blocks,
- *                          call ocfs2_start_trans with the handle returned
- *                          from this function. You may call ocfs2_commit_trans
- *                           at any time in the lifetime of a handle.
  *  ocfs2_start_trans      - Begin a transaction. Give it an upper estimate of
  *                          the number of blocks that will be changed during
  *                          this handle.
- *  ocfs2_commit_trans     - Complete a handle.
+ *  ocfs2_commit_trans - Complete a handle. It might return -EIO if
+ *                       the journal was aborted. The majority of paths don't
+ *                       check the return value as an error there comes too
+ *                       late to do anything (and will be picked up in a
+ *                       later transaction).
  *  ocfs2_extend_trans     - Extend a handle by nblocks credits. This may
  *                          commit the handle to disk in the process, but will
  *                          not release any locks taken during the transaction.
@@ -249,24 +209,16 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode)
  *  ocfs2_journal_dirty    - Mark a journalled buffer as having dirty data.
  *  ocfs2_journal_dirty_data - Indicate that a data buffer should go out before
  *                             the current handle commits.
- *  ocfs2_handle_add_lock  - Sometimes we need to delay lock release
- *                          until after a transaction has been completed. Use
- *                          ocfs2_handle_add_lock to indicate that a lock needs
- *                          to be released at the end of that handle. Locks
- *                          will be released in the order that they are added.
- *  ocfs2_handle_add_inode - Add a locked inode to a transaction.
  */
 
 /* You must always start_trans with a number of buffs > 0, but it's
  * perfectly legal to go through an entire transaction without having
  * dirtied any buffers. */
-struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb);
-struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
-                                              struct ocfs2_journal_handle *handle,
+handle_t                   *ocfs2_start_trans(struct ocfs2_super *osb,
                                               int max_buffs);
-void                        ocfs2_commit_trans(struct ocfs2_journal_handle *handle);
-int                         ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
-                                               int nblocks);
+int                         ocfs2_commit_trans(struct ocfs2_super *osb,
+                                               handle_t *handle);
+int                         ocfs2_extend_trans(handle_t *handle, int nblocks);
 
 /*
  * Create access is for when we get a newly created buffer and we're
@@ -283,7 +235,7 @@ int                      ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
 #define OCFS2_JOURNAL_ACCESS_WRITE  1
 #define OCFS2_JOURNAL_ACCESS_UNDO   2
 
-int                  ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+int                  ocfs2_journal_access(handle_t *handle,
                                          struct inode *inode,
                                          struct buffer_head *bh,
                                          int type);
@@ -306,18 +258,10 @@ int                  ocfs2_journal_access(struct ocfs2_journal_handle *handle,
  *     <modify the bh>
  *     ocfs2_journal_dirty(handle, bh);
  */
-int                  ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+int                  ocfs2_journal_dirty(handle_t *handle,
                                         struct buffer_head *bh);
 int                  ocfs2_journal_dirty_data(handle_t *handle,
                                              struct buffer_head *bh);
-int                  ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
-                                          struct inode *inode);
-/*
- * Use this to protect from other processes reading buffer state while
- * it's in flight.
- */
-void                 ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
-                                           struct inode *inode);
 
 /*
  *  Credit Macros:
index 1f17a4d08287fbd6b1e02fc13c3aac997bdcd2c1..698d79a74ef8ec4fea233f591485f9a39b8c9b31 100644 (file)
@@ -58,19 +58,18 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
 static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
 
 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
-                                   struct ocfs2_journal_handle *handle,
+                                   handle_t *handle,
                                    struct ocfs2_dinode *alloc,
                                    struct inode *main_bm_inode,
                                    struct buffer_head *main_bm_bh);
 
 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
-                                               struct ocfs2_journal_handle *handle,
                                                struct ocfs2_alloc_context **ac,
                                                struct inode **bitmap_inode,
                                                struct buffer_head **bitmap_bh);
 
 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
-                                       struct ocfs2_journal_handle *handle,
+                                       handle_t *handle,
                                        struct ocfs2_alloc_context *ac);
 
 static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
@@ -196,7 +195,7 @@ bail:
 void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
 {
        int status;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle;
        struct inode *local_alloc_inode = NULL;
        struct buffer_head *bh = NULL;
        struct buffer_head *main_bm_bh = NULL;
@@ -207,7 +206,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        mlog_entry_void();
 
        if (osb->local_alloc_state == OCFS2_LA_UNUSED)
-               goto bail;
+               goto out;
 
        local_alloc_inode =
                ocfs2_get_system_file_inode(osb,
@@ -216,40 +215,34 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        if (!local_alloc_inode) {
                status = -ENOENT;
                mlog_errno(status);
-               goto bail;
+               goto out;
        }
 
        osb->local_alloc_state = OCFS2_LA_DISABLED;
 
-       handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        main_bm_inode = ocfs2_get_system_file_inode(osb,
                                                    GLOBAL_BITMAP_SYSTEM_INODE,
                                                    OCFS2_INVALID_SLOT);
        if (!main_bm_inode) {
                status = -EINVAL;
                mlog_errno(status);
-               goto bail;
+               goto out;
        }
 
-       ocfs2_handle_add_inode(handle, main_bm_inode);
-       status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+       mutex_lock(&main_bm_inode->i_mutex);
+
+       status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1);
        if (status < 0) {
                mlog_errno(status);
-               goto bail;
+               goto out_mutex;
        }
 
        /* WINDOW_MOVE_CREDITS is a bit heavy... */
-       handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
        if (IS_ERR(handle)) {
                mlog_errno(PTR_ERR(handle));
                handle = NULL;
-               goto bail;
+               goto out_unlock;
        }
 
        bh = osb->local_alloc_bh;
@@ -258,7 +251,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        alloc_copy = kmalloc(bh->b_size, GFP_KERNEL);
        if (!alloc_copy) {
                status = -ENOMEM;
-               goto bail;
+               goto out_commit;
        }
        memcpy(alloc_copy, alloc, bh->b_size);
 
@@ -266,7 +259,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
                                      OCFS2_JOURNAL_ACCESS_WRITE);
        if (status < 0) {
                mlog_errno(status);
-               goto bail;
+               goto out_commit;
        }
 
        ocfs2_clear_local_alloc(alloc);
@@ -274,7 +267,7 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        status = ocfs2_journal_dirty(handle, bh);
        if (status < 0) {
                mlog_errno(status);
-               goto bail;
+               goto out_commit;
        }
 
        brelse(bh);
@@ -286,16 +279,20 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        if (status < 0)
                mlog_errno(status);
 
-bail:
-       if (handle)
-               ocfs2_commit_trans(handle);
+out_commit:
+       ocfs2_commit_trans(osb, handle);
 
+out_unlock:
        if (main_bm_bh)
                brelse(main_bm_bh);
 
-       if (main_bm_inode)
-               iput(main_bm_inode);
+       ocfs2_meta_unlock(main_bm_inode, 1);
 
+out_mutex:
+       mutex_unlock(&main_bm_inode->i_mutex);
+       iput(main_bm_inode);
+
+out:
        if (local_alloc_inode)
                iput(local_alloc_inode);
 
@@ -385,61 +382,59 @@ int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
                                        struct ocfs2_dinode *alloc)
 {
        int status;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle;
        struct buffer_head *main_bm_bh = NULL;
-       struct inode *main_bm_inode = NULL;
+       struct inode *main_bm_inode;
 
        mlog_entry_void();
 
-       handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        main_bm_inode = ocfs2_get_system_file_inode(osb,
                                                    GLOBAL_BITMAP_SYSTEM_INODE,
                                                    OCFS2_INVALID_SLOT);
        if (!main_bm_inode) {
                status = -EINVAL;
                mlog_errno(status);
-               goto bail;
+               goto out;
        }
 
-       ocfs2_handle_add_inode(handle, main_bm_inode);
-       status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+       mutex_lock(&main_bm_inode->i_mutex);
+
+       status = ocfs2_meta_lock(main_bm_inode, &main_bm_bh, 1);
        if (status < 0) {
                mlog_errno(status);
-               goto bail;
+               goto out_mutex;
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
                mlog_errno(status);
-               goto bail;
+               goto out_unlock;
        }
 
        /* we want the bitmap change to be recorded on disk asap */
-       ocfs2_handle_set_sync(handle, 1);
+       handle->h_sync = 1;
 
        status = ocfs2_sync_local_to_main(osb, handle, alloc,
                                          main_bm_inode, main_bm_bh);
        if (status < 0)
                mlog_errno(status);
 
-bail:
-       if (handle)
-               ocfs2_commit_trans(handle);
+       ocfs2_commit_trans(osb, handle);
+
+out_unlock:
+       ocfs2_meta_unlock(main_bm_inode, 1);
+
+out_mutex:
+       mutex_unlock(&main_bm_inode->i_mutex);
 
        if (main_bm_bh)
                brelse(main_bm_bh);
 
-       if (main_bm_inode)
-               iput(main_bm_inode);
+       iput(main_bm_inode);
 
+out:
        mlog_exit(status);
        return status;
 }
@@ -452,7 +447,6 @@ bail:
  * our own in order to shift windows.
  */
 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
-                                  struct ocfs2_journal_handle *passed_handle,
                                   u32 bits_wanted,
                                   struct ocfs2_alloc_context *ac)
 {
@@ -463,9 +457,7 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
 
        mlog_entry_void();
 
-       BUG_ON(!passed_handle);
        BUG_ON(!ac);
-       BUG_ON(passed_handle->flags & OCFS2_HANDLE_STARTED);
 
        local_alloc_inode =
                ocfs2_get_system_file_inode(osb,
@@ -476,7 +468,11 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
                mlog_errno(status);
                goto bail;
        }
-       ocfs2_handle_add_inode(passed_handle, local_alloc_inode);
+
+       mutex_lock(&local_alloc_inode->i_mutex);
+
+       ac->ac_inode = local_alloc_inode;
+       ac->ac_which = OCFS2_AC_USE_LOCAL;
 
        if (osb->local_alloc_state != OCFS2_LA_ENABLED) {
                status = -ENOSPC;
@@ -515,21 +511,17 @@ int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
                }
        }
 
-       ac->ac_inode = igrab(local_alloc_inode);
        get_bh(osb->local_alloc_bh);
        ac->ac_bh = osb->local_alloc_bh;
-       ac->ac_which = OCFS2_AC_USE_LOCAL;
        status = 0;
 bail:
-       if (local_alloc_inode)
-               iput(local_alloc_inode);
 
        mlog_exit(status);
        return status;
 }
 
 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
-                                struct ocfs2_journal_handle *handle,
+                                handle_t *handle,
                                 struct ocfs2_alloc_context *ac,
                                 u32 min_bits,
                                 u32 *bit_off,
@@ -707,7 +699,7 @@ static void ocfs2_verify_zero_bits(unsigned long *bitmap,
  * passed is used for caching.
  */
 static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
-                                   struct ocfs2_journal_handle *handle,
+                                   handle_t *handle,
                                    struct ocfs2_dinode *alloc,
                                    struct inode *main_bm_inode,
                                    struct buffer_head *main_bm_bh)
@@ -778,7 +770,6 @@ bail:
 }
 
 static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
-                                               struct ocfs2_journal_handle *handle,
                                                struct ocfs2_alloc_context **ac,
                                                struct inode **bitmap_inode,
                                                struct buffer_head **bitmap_bh)
@@ -792,7 +783,6 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
                goto bail;
        }
 
-       (*ac)->ac_handle = handle;
        (*ac)->ac_bits_wanted = ocfs2_local_alloc_window_bits(osb);
 
        status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
@@ -821,7 +811,7 @@ bail:
  * pass it the bitmap lock in lock_bh if you have it.
  */
 static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
-                                       struct ocfs2_journal_handle *handle,
+                                       handle_t *handle,
                                        struct ocfs2_alloc_context *ac)
 {
        int status = 0;
@@ -888,23 +878,15 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
        int status = 0;
        struct buffer_head *main_bm_bh = NULL;
        struct inode *main_bm_inode = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_dinode *alloc;
        struct ocfs2_dinode *alloc_copy = NULL;
        struct ocfs2_alloc_context *ac = NULL;
 
        mlog_entry_void();
 
-       handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        /* This will lock the main bitmap for us. */
        status = ocfs2_local_alloc_reserve_for_window(osb,
-                                                     handle,
                                                      &ac,
                                                      &main_bm_inode,
                                                      &main_bm_bh);
@@ -914,7 +896,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
                goto bail;
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_WINDOW_MOVE_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -972,7 +954,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
        status = 0;
 bail:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
 
        if (main_bm_bh)
                brelse(main_bm_bh);
index 30f88ce14e460b0169696fe2f97c248867295cd9..385a10152f9c8a7fd72cad76ec2a0bc6a90eff09 100644 (file)
@@ -42,12 +42,11 @@ int ocfs2_alloc_should_use_local(struct ocfs2_super *osb,
 
 struct ocfs2_alloc_context;
 int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
-                                  struct ocfs2_journal_handle *passed_handle,
                                   u32 bits_wanted,
                                   struct ocfs2_alloc_context *ac);
 
 int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
-                                struct ocfs2_journal_handle *handle,
+                                handle_t *handle,
                                 struct ocfs2_alloc_context *ac,
                                 u32 min_bits,
                                 u32 *bit_off,
index 83934e33e5b06de784e108672e552a4eb507feaf..69f85ae392dcc9ab0a830a71c576dabda1f779b0 100644 (file)
@@ -82,6 +82,8 @@ static struct vm_operations_struct ocfs2_file_vm_ops = {
 
 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
 {
+       int ret = 0, lock_level = 0;
+
        /* We don't want to support shared writable mappings yet. */
        if (((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE))
            && ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) {
@@ -91,7 +93,14 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
        }
 
-       file_accessed(file);
+       ret = ocfs2_meta_lock_atime(file->f_dentry->d_inode,
+                                   file->f_vfsmnt, &lock_level);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+       ocfs2_meta_unlock(file->f_dentry->d_inode, lock_level);
+out:
        vma->vm_ops = &ocfs2_file_vm_ops;
        return 0;
 }
index a57b751d4f40e3fad3cf7bc1df95b09816f8307c..21db45ddf144c2ebbfb96237b6d1c9a8970776ae 100644 (file)
@@ -75,12 +75,12 @@ static int inline ocfs2_search_dirblock(struct buffer_head *bh,
                                        unsigned long offset,
                                        struct ocfs2_dir_entry **res_dir);
 
-static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+static int ocfs2_delete_entry(handle_t *handle,
                              struct inode *dir,
                              struct ocfs2_dir_entry *de_del,
                              struct buffer_head *bh);
 
-static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static int __ocfs2_add_entry(handle_t *handle,
                             struct inode *dir,
                             const char *name, int namelen,
                             struct inode *inode, u64 blkno,
@@ -93,43 +93,37 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
                              dev_t dev,
                              struct buffer_head **new_fe_bh,
                              struct buffer_head *parent_fe_bh,
-                             struct ocfs2_journal_handle *handle,
+                             handle_t *handle,
                              struct inode **ret_inode,
                              struct ocfs2_alloc_context *inode_ac);
 
 static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
-                             struct ocfs2_journal_handle *handle,
+                             handle_t *handle,
                              struct inode *parent,
                              struct inode *inode,
                              struct buffer_head *fe_bh,
                              struct ocfs2_alloc_context *data_ac);
 
-static int ocfs2_double_lock(struct ocfs2_super *osb,
-                            struct ocfs2_journal_handle *handle,
-                            struct buffer_head **bh1,
-                            struct inode *inode1,
-                            struct buffer_head **bh2,
-                            struct inode *inode2);
-
 static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct ocfs2_journal_handle *handle,
+                                   struct inode **ret_orphan_dir,
                                    struct inode *inode,
                                    char *name,
                                    struct buffer_head **de_bh);
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
+                           handle_t *handle,
                            struct inode *inode,
                            struct ocfs2_dinode *fe,
                            char *name,
-                           struct buffer_head *de_bh);
+                           struct buffer_head *de_bh,
+                           struct inode *orphan_dir_inode);
 
 static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
-                                    struct ocfs2_journal_handle *handle,
+                                    handle_t *handle,
                                     struct inode *inode,
                                     const char *symname);
 
-static inline int ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_add_entry(handle_t *handle,
                                  struct dentry *dentry,
                                  struct inode *inode, u64 blkno,
                                  struct buffer_head *parent_fe_bh,
@@ -165,7 +159,7 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
        mlog(0, "find name %.*s in directory %llu\n", dentry->d_name.len,
             dentry->d_name.name, (unsigned long long)OCFS2_I(dir)->ip_blkno);
 
-       status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+       status = ocfs2_meta_lock(dir, NULL, 0);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -242,7 +236,7 @@ bail:
 }
 
 static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
-                             struct ocfs2_journal_handle *handle,
+                             handle_t *handle,
                              struct inode *parent,
                              struct inode *inode,
                              struct buffer_head *fe_bh,
@@ -317,7 +311,7 @@ static int ocfs2_mknod(struct inode *dir,
 {
        int status = 0;
        struct buffer_head *parent_fe_bh = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_super *osb;
        struct ocfs2_dinode *dirfe;
        struct buffer_head *new_fe_bh = NULL;
@@ -333,18 +327,11 @@ static int ocfs2_mknod(struct inode *dir,
        /* get our super block */
        osb = OCFS2_SB(dir->i_sb);
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto leave;
-       }
-
-       status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+       status = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
-               goto leave;
+               return status;
        }
 
        if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) {
@@ -374,7 +361,7 @@ static int ocfs2_mknod(struct inode *dir,
        }
 
        /* reserve an inode spot */
-       status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+       status = ocfs2_reserve_new_inode(osb, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -384,7 +371,7 @@ static int ocfs2_mknod(struct inode *dir,
        /* are we making a directory? If so, reserve a cluster for his
         * 1st extent. */
        if (S_ISDIR(mode)) {
-               status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+               status = ocfs2_reserve_clusters(osb, 1, &data_ac);
                if (status < 0) {
                        if (status != -ENOSPC)
                                mlog_errno(status);
@@ -392,7 +379,7 @@ static int ocfs2_mknod(struct inode *dir,
                }
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_MKNOD_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_MKNOD_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -453,7 +440,9 @@ static int ocfs2_mknod(struct inode *dir,
        status = 0;
 leave:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
+
+       ocfs2_meta_unlock(dir, 1);
 
        if (status == -ENOSPC)
                mlog(0, "Disk is full\n");
@@ -487,7 +476,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
                              dev_t dev,
                              struct buffer_head **new_fe_bh,
                              struct buffer_head *parent_fe_bh,
-                             struct ocfs2_journal_handle *handle,
+                             handle_t *handle,
                              struct inode **ret_inode,
                              struct ocfs2_alloc_context *inode_ac)
 {
@@ -653,7 +642,7 @@ static int ocfs2_link(struct dentry *old_dentry,
                      struct inode *dir,
                      struct dentry *dentry)
 {
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle;
        struct inode *inode = old_dentry->d_inode;
        int err;
        struct buffer_head *fe_bh = NULL;
@@ -666,68 +655,60 @@ static int ocfs2_link(struct dentry *old_dentry,
                   old_dentry->d_name.len, old_dentry->d_name.name,
                   dentry->d_name.len, dentry->d_name.name);
 
-       if (S_ISDIR(inode->i_mode)) {
-               err = -EPERM;
-               goto bail;
-       }
-
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               err = -ENOMEM;
-               goto bail;
-       }
+       if (S_ISDIR(inode->i_mode))
+               return -EPERM;
 
-       err = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+       err = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
-               goto bail;
+               return err;
        }
 
        if (!dir->i_nlink) {
                err = -ENOENT;
-               goto bail;
+               goto out;
        }
 
        err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
                                        dentry->d_name.len);
        if (err)
-               goto bail;
+               goto out;
 
        err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
                                           dentry->d_name.name,
                                           dentry->d_name.len, &de_bh);
        if (err < 0) {
                mlog_errno(err);
-               goto bail;
+               goto out;
        }
 
-       err = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+       err = ocfs2_meta_lock(inode, &fe_bh, 1);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
-               goto bail;
+               goto out;
        }
 
        fe = (struct ocfs2_dinode *) fe_bh->b_data;
        if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) {
                err = -EMLINK;
-               goto bail;
+               goto out_unlock_inode;
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_LINK_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_LINK_CREDITS);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
                handle = NULL;
                mlog_errno(err);
-               goto bail;
+               goto out_unlock_inode;
        }
 
        err = ocfs2_journal_access(handle, inode, fe_bh,
                                   OCFS2_JOURNAL_ACCESS_WRITE);
        if (err < 0) {
                mlog_errno(err);
-               goto bail;
+               goto out_commit;
        }
 
        inc_nlink(inode);
@@ -741,7 +722,7 @@ static int ocfs2_link(struct dentry *old_dentry,
                le16_add_cpu(&fe->i_links_count, -1);
                drop_nlink(inode);
                mlog_errno(err);
-               goto bail;
+               goto out_commit;
        }
 
        err = ocfs2_add_entry(handle, dentry, inode,
@@ -751,21 +732,27 @@ static int ocfs2_link(struct dentry *old_dentry,
                le16_add_cpu(&fe->i_links_count, -1);
                drop_nlink(inode);
                mlog_errno(err);
-               goto bail;
+               goto out_commit;
        }
 
        err = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(dir)->ip_blkno);
        if (err) {
                mlog_errno(err);
-               goto bail;
+               goto out_commit;
        }
 
        atomic_inc(&inode->i_count);
        dentry->d_op = &ocfs2_dentry_ops;
        d_instantiate(dentry, inode);
-bail:
-       if (handle)
-               ocfs2_commit_trans(handle);
+
+out_commit:
+       ocfs2_commit_trans(osb, handle);
+out_unlock_inode:
+       ocfs2_meta_unlock(inode, 1);
+
+out:
+       ocfs2_meta_unlock(dir, 1);
+
        if (de_bh)
                brelse(de_bh);
        if (fe_bh)
@@ -812,13 +799,15 @@ static int ocfs2_unlink(struct inode *dir,
                        struct dentry *dentry)
 {
        int status;
+       int child_locked = 0;
        struct inode *inode = dentry->d_inode;
+       struct inode *orphan_dir = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        u64 blkno;
        struct ocfs2_dinode *fe = NULL;
        struct buffer_head *fe_bh = NULL;
        struct buffer_head *parent_node_bh = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_dir_entry *dirent = NULL;
        struct buffer_head *dirent_bh = NULL;
        char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
@@ -833,22 +822,14 @@ static int ocfs2_unlink(struct inode *dir,
 
        if (inode == osb->root_inode) {
                mlog(0, "Cannot delete the root directory\n");
-               status = -EPERM;
-               goto leave;
+               return -EPERM;
        }
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto leave;
-       }
-
-       status = ocfs2_meta_lock(dir, handle, &parent_node_bh, 1);
+       status = ocfs2_meta_lock(dir, &parent_node_bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
-               goto leave;
+               return status;
        }
 
        status = ocfs2_find_files_on_disk(dentry->d_name.name,
@@ -869,12 +850,13 @@ static int ocfs2_unlink(struct inode *dir,
                goto leave;
        }
 
-       status = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+       status = ocfs2_meta_lock(inode, &fe_bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
                goto leave;
        }
+       child_locked = 1;
 
        if (S_ISDIR(inode->i_mode)) {
                if (!ocfs2_empty_dir(inode)) {
@@ -895,7 +877,7 @@ static int ocfs2_unlink(struct inode *dir,
        }
 
        if (inode_is_unlinkable(inode)) {
-               status = ocfs2_prepare_orphan_dir(osb, handle, inode,
+               status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode,
                                                  orphan_name,
                                                  &orphan_entry_bh);
                if (status < 0) {
@@ -904,7 +886,7 @@ static int ocfs2_unlink(struct inode *dir,
                }
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_UNLINK_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_UNLINK_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -923,7 +905,7 @@ static int ocfs2_unlink(struct inode *dir,
 
        if (inode_is_unlinkable(inode)) {
                status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name,
-                                         orphan_entry_bh);
+                                         orphan_entry_bh, orphan_dir);
                if (status < 0) {
                        mlog_errno(status);
                        goto leave;
@@ -960,7 +942,19 @@ static int ocfs2_unlink(struct inode *dir,
 
 leave:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
+
+       if (child_locked)
+               ocfs2_meta_unlock(inode, 1);
+
+       ocfs2_meta_unlock(dir, 1);
+
+       if (orphan_dir) {
+               /* This was locked for us in ocfs2_prepare_orphan_dir() */
+               ocfs2_meta_unlock(orphan_dir, 1);
+               mutex_unlock(&orphan_dir->i_mutex);
+               iput(orphan_dir);
+       }
 
        if (fe_bh)
                brelse(fe_bh);
@@ -984,7 +978,6 @@ leave:
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
-                            struct ocfs2_journal_handle *handle,
                             struct buffer_head **bh1,
                             struct inode *inode1,
                             struct buffer_head **bh2,
@@ -1000,8 +993,6 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                   (unsigned long long)oi1->ip_blkno,
                   (unsigned long long)oi2->ip_blkno);
 
-       BUG_ON(!handle);
-
        if (*bh1)
                *bh1 = NULL;
        if (*bh2)
@@ -1021,25 +1012,41 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                        inode1 = tmpinode;
                }
                /* lock id2 */
-               status = ocfs2_meta_lock(inode2, handle, bh2, 1);
+               status = ocfs2_meta_lock(inode2, bh2, 1);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
                        goto bail;
                }
        }
+
        /* lock id1 */
-       status = ocfs2_meta_lock(inode1, handle, bh1, 1);
+       status = ocfs2_meta_lock(inode1, bh1, 1);
        if (status < 0) {
+               /*
+                * An error return must mean that no cluster locks
+                * were held on function exit.
+                */
+               if (oi1->ip_blkno != oi2->ip_blkno)
+                       ocfs2_meta_unlock(inode2, 1);
+
                if (status != -ENOENT)
                        mlog_errno(status);
-               goto bail;
        }
+
 bail:
        mlog_exit(status);
        return status;
 }
 
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2)
+{
+       ocfs2_meta_unlock(inode1, 1);
+
+       if (inode1 != inode2)
+               ocfs2_meta_unlock(inode2, 1);
+}
+
 #define PARENT_INO(buffer) \
        ((struct ocfs2_dir_entry *) \
         ((char *)buffer + \
@@ -1050,9 +1057,11 @@ static int ocfs2_rename(struct inode *old_dir,
                        struct inode *new_dir,
                        struct dentry *new_dentry)
 {
-       int status = 0, rename_lock = 0;
+       int status = 0, rename_lock = 0, parents_locked = 0;
+       int old_child_locked = 0, new_child_locked = 0;
        struct inode *old_inode = old_dentry->d_inode;
        struct inode *new_inode = new_dentry->d_inode;
+       struct inode *orphan_dir = NULL;
        struct ocfs2_dinode *newfe = NULL;
        char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
        struct buffer_head *orphan_entry_bh = NULL;
@@ -1060,7 +1069,7 @@ static int ocfs2_rename(struct inode *old_dir,
        struct buffer_head *insert_entry_bh = NULL;
        struct ocfs2_super *osb = NULL;
        u64 newfe_blkno;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct buffer_head *old_dir_bh = NULL;
        struct buffer_head *new_dir_bh = NULL;
        struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry
@@ -1105,21 +1114,14 @@ static int ocfs2_rename(struct inode *old_dir,
                rename_lock = 1;
        }
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        /* if old and new are the same, this'll just do one lock. */
-       status = ocfs2_double_lock(osb, handle,
-                                 &old_dir_bh, old_dir,
-                                 &new_dir_bh, new_dir);
+       status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+                                  &new_dir_bh, new_dir);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
        }
+       parents_locked = 1;
 
        /* make sure both dirs have bhs
         * get an extra ref on old_dir_bh if old==new */
@@ -1140,12 +1142,13 @@ static int ocfs2_rename(struct inode *old_dir,
         * the vote thread on other nodes won't have to concurrently
         * downconvert the inode and the dentry locks.
         */
-       status = ocfs2_meta_lock(old_inode, handle, NULL, 1);
+       status = ocfs2_meta_lock(old_inode, NULL, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
                goto bail;
        }
+       old_child_locked = 1;
 
        status = ocfs2_remote_dentry_delete(old_dentry);
        if (status < 0) {
@@ -1231,12 +1234,13 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
 
-               status = ocfs2_meta_lock(new_inode, handle, &newfe_bh, 1);
+               status = ocfs2_meta_lock(new_inode, &newfe_bh, 1);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
                        goto bail;
                }
+               new_child_locked = 1;
 
                status = ocfs2_remote_dentry_delete(new_dentry);
                if (status < 0) {
@@ -1252,7 +1256,7 @@ static int ocfs2_rename(struct inode *old_dir,
                     (unsigned long long)newfe_bh->b_blocknr : 0ULL);
 
                if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
-                       status = ocfs2_prepare_orphan_dir(osb, handle,
+                       status = ocfs2_prepare_orphan_dir(osb, &orphan_dir,
                                                          new_inode,
                                                          orphan_name,
                                                          &orphan_entry_bh);
@@ -1280,7 +1284,7 @@ static int ocfs2_rename(struct inode *old_dir,
                }
        }
 
-       handle = ocfs2_start_trans(osb, handle, OCFS2_RENAME_CREDITS);
+       handle = ocfs2_start_trans(osb, OCFS2_RENAME_CREDITS);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -1307,7 +1311,7 @@ static int ocfs2_rename(struct inode *old_dir,
                    (newfe->i_links_count == cpu_to_le16(1))){
                        status = ocfs2_orphan_add(osb, handle, new_inode,
                                                  newfe, orphan_name,
-                                                 orphan_entry_bh);
+                                                 orphan_entry_bh, orphan_dir);
                        if (status < 0) {
                                mlog_errno(status);
                                goto bail;
@@ -1424,7 +1428,23 @@ bail:
                ocfs2_rename_unlock(osb);
 
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
+
+       if (parents_locked)
+               ocfs2_double_unlock(old_dir, new_dir);
+
+       if (old_child_locked)
+               ocfs2_meta_unlock(old_inode, 1);
+
+       if (new_child_locked)
+               ocfs2_meta_unlock(new_inode, 1);
+
+       if (orphan_dir) {
+               /* This was locked for us in ocfs2_prepare_orphan_dir() */
+               ocfs2_meta_unlock(orphan_dir, 1);
+               mutex_unlock(&orphan_dir->i_mutex);
+               iput(orphan_dir);
+       }
 
        if (new_inode)
                sync_mapping_buffers(old_inode->i_mapping);
@@ -1458,7 +1478,7 @@ bail:
  * data, including the null terminator.
  */
 static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
-                                    struct ocfs2_journal_handle *handle,
+                                    handle_t *handle,
                                     struct inode *inode,
                                     const char *symname)
 {
@@ -1573,7 +1593,7 @@ static int ocfs2_symlink(struct inode *dir,
        struct buffer_head *parent_fe_bh = NULL;
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_dinode *dirfe;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        struct ocfs2_alloc_context *inode_ac = NULL;
        struct ocfs2_alloc_context *data_ac = NULL;
 
@@ -1587,19 +1607,12 @@ static int ocfs2_symlink(struct inode *dir,
 
        credits = ocfs2_calc_symlink_credits(sb);
 
-       handle = ocfs2_alloc_handle(osb);
-       if (handle == NULL) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        /* lock the parent directory */
-       status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+       status = ocfs2_meta_lock(dir, &parent_fe_bh, 1);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
-               goto bail;
+               return status;
        }
 
        dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
@@ -1622,7 +1635,7 @@ static int ocfs2_symlink(struct inode *dir,
                goto bail;
        }
 
-       status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+       status = ocfs2_reserve_new_inode(osb, &inode_ac);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -1631,7 +1644,7 @@ static int ocfs2_symlink(struct inode *dir,
 
        /* don't reserve bitmap space for fast symlinks. */
        if (l > ocfs2_fast_symlink_chars(sb)) {
-               status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+               status = ocfs2_reserve_clusters(osb, 1, &data_ac);
                if (status < 0) {
                        if (status != -ENOSPC)
                                mlog_errno(status);
@@ -1639,7 +1652,7 @@ static int ocfs2_symlink(struct inode *dir,
                }
        }
 
-       handle = ocfs2_start_trans(osb, handle, credits);
+       handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -1717,7 +1730,10 @@ static int ocfs2_symlink(struct inode *dir,
        d_instantiate(dentry, inode);
 bail:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
+
+       ocfs2_meta_unlock(dir, 1);
+
        if (new_fe_bh)
                brelse(new_fe_bh);
        if (parent_fe_bh)
@@ -1768,7 +1784,7 @@ int ocfs2_check_dir_entry(struct inode * dir,
  * If you pass me insert_bh, I'll skip the search of the other dir
  * blocks and put the record in there.
  */
-static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+static int __ocfs2_add_entry(handle_t *handle,
                             struct inode *dir,
                             const char *name, int namelen,
                             struct inode *inode, u64 blkno,
@@ -1854,7 +1870,7 @@ bail:
  * ocfs2_delete_entry deletes a directory entry by merging it with the
  * previous entry
  */
-static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+static int ocfs2_delete_entry(handle_t *handle,
                              struct inode *dir,
                              struct ocfs2_dir_entry *de_del,
                              struct buffer_head *bh)
@@ -2085,19 +2101,19 @@ bail:
 }
 
 static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
-                                   struct ocfs2_journal_handle *handle,
+                                   struct inode **ret_orphan_dir,
                                    struct inode *inode,
                                    char *name,
                                    struct buffer_head **de_bh)
 {
-       struct inode *orphan_dir_inode = NULL;
+       struct inode *orphan_dir_inode;
        struct buffer_head *orphan_dir_bh = NULL;
        int status = 0;
 
        status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
        if (status < 0) {
                mlog_errno(status);
-               goto leave;
+               return status;
        }
 
        orphan_dir_inode = ocfs2_get_system_file_inode(osb,
@@ -2106,11 +2122,12 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
        if (!orphan_dir_inode) {
                status = -ENOENT;
                mlog_errno(status);
-               goto leave;
+               return status;
        }
 
-       ocfs2_handle_add_inode(handle, orphan_dir_inode);
-       status = ocfs2_meta_lock(orphan_dir_inode, handle, &orphan_dir_bh, 1);
+       mutex_lock(&orphan_dir_inode->i_mutex);
+
+       status = ocfs2_meta_lock(orphan_dir_inode, &orphan_dir_bh, 1);
        if (status < 0) {
                mlog_errno(status);
                goto leave;
@@ -2120,13 +2137,19 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
                                              orphan_dir_bh, name,
                                              OCFS2_ORPHAN_NAMELEN, de_bh);
        if (status < 0) {
+               ocfs2_meta_unlock(orphan_dir_inode, 1);
+
                mlog_errno(status);
                goto leave;
        }
 
+       *ret_orphan_dir = orphan_dir_inode;
+
 leave:
-       if (orphan_dir_inode)
+       if (status) {
+               mutex_unlock(&orphan_dir_inode->i_mutex);
                iput(orphan_dir_inode);
+       }
 
        if (orphan_dir_bh)
                brelse(orphan_dir_bh);
@@ -2136,28 +2159,19 @@ leave:
 }
 
 static int ocfs2_orphan_add(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
+                           handle_t *handle,
                            struct inode *inode,
                            struct ocfs2_dinode *fe,
                            char *name,
-                           struct buffer_head *de_bh)
+                           struct buffer_head *de_bh,
+                           struct inode *orphan_dir_inode)
 {
-       struct inode *orphan_dir_inode = NULL;
        struct buffer_head *orphan_dir_bh = NULL;
        int status = 0;
        struct ocfs2_dinode *orphan_fe;
 
        mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
 
-       orphan_dir_inode = ocfs2_get_system_file_inode(osb,
-                                                      ORPHAN_DIR_SYSTEM_INODE,
-                                                      osb->slot_num);
-       if (!orphan_dir_inode) {
-               status = -ENOENT;
-               mlog_errno(status);
-               goto leave;
-       }
-
        status = ocfs2_read_block(osb,
                                  OCFS2_I(orphan_dir_inode)->ip_blkno,
                                  &orphan_dir_bh, OCFS2_BH_CACHED,
@@ -2209,9 +2223,6 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
             (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
 
 leave:
-       if (orphan_dir_inode)
-               iput(orphan_dir_inode);
-
        if (orphan_dir_bh)
                brelse(orphan_dir_bh);
 
@@ -2221,7 +2232,7 @@ leave:
 
 /* unlike orphan_add, we expect the orphan dir to already be locked here. */
 int ocfs2_orphan_del(struct ocfs2_super *osb,
-                    struct ocfs2_journal_handle *handle,
+                    handle_t *handle,
                     struct inode *orphan_dir_inode,
                     struct inode *inode,
                     struct buffer_head *orphan_dir_bh)
@@ -2300,4 +2311,5 @@ struct inode_operations ocfs2_dir_iops = {
        .rename         = ocfs2_rename,
        .setattr        = ocfs2_setattr,
        .getattr        = ocfs2_getattr,
+       .permission     = ocfs2_permission,
 };
index deaaa97dbf0bf1900dd427d0a836ab3d028f4ab2..8425944fcccdf360eeef212b7ca056686b62f564 100644 (file)
@@ -39,7 +39,7 @@ struct buffer_head *ocfs2_find_entry(const char *name,
                                     struct inode *dir,
                                     struct ocfs2_dir_entry **res_dir);
 int ocfs2_orphan_del(struct ocfs2_super *osb,
-                    struct ocfs2_journal_handle *handle,
+                    handle_t *handle,
                     struct inode *orphan_dir_inode,
                     struct inode *inode,
                     struct buffer_head *orphan_dir_bh);
index 0462a7f4e21b398300f3ed1bf8b80b6b7d30ce07..b767fd7da6ebaeb44ed58a888b80cb2dd99ce545 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/workqueue.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
+#include <linux/jbd.h>
 
 #include "cluster/nodemanager.h"
 #include "cluster/heartbeat.h"
@@ -179,9 +180,9 @@ enum ocfs2_mount_options
 #define OCFS2_OSB_SOFT_RO      0x0001
 #define OCFS2_OSB_HARD_RO      0x0002
 #define OCFS2_OSB_ERROR_FS     0x0004
+#define OCFS2_DEFAULT_ATIME_QUANTUM    60
 
 struct ocfs2_journal;
-struct ocfs2_journal_handle;
 struct ocfs2_super
 {
        struct task_struct *commit_task;
@@ -218,6 +219,7 @@ struct ocfs2_super
        unsigned long osb_flags;
 
        unsigned long s_mount_opt;
+       unsigned int s_atime_quantum;
 
        u16 max_slots;
        s16 node_num;
@@ -283,7 +285,7 @@ struct ocfs2_super
        /* Truncate log info */
        struct inode                    *osb_tl_inode;
        struct buffer_head              *osb_tl_bh;
-       struct work_struct              osb_truncate_log_wq;
+       struct delayed_work             osb_truncate_log_wq;
 
        struct ocfs2_node_map           osb_recovering_orphan_dirs;
        unsigned int                    *osb_orphan_wipes;
index 9d91e66f51a9fa4606a6f864e2bc7531e10c1a75..000d71cca6c50dfe919524fb16c7d8e261230701 100644 (file)
@@ -49,7 +49,7 @@
 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
-static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+static int ocfs2_block_group_fill(handle_t *handle,
                                  struct inode *alloc_inode,
                                  struct buffer_head *bg_bh,
                                  u64 group_blkno,
@@ -59,9 +59,6 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
                                   struct inode *alloc_inode,
                                   struct buffer_head *bh);
 
-static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
-                                      struct ocfs2_alloc_context *ac);
-
 static int ocfs2_cluster_group_search(struct inode *inode,
                                      struct buffer_head *group_bh,
                                      u32 bits_wanted, u32 min_bits,
@@ -72,6 +69,7 @@ static int ocfs2_block_group_search(struct inode *inode,
                                    u16 *bit_off, u16 *bits_found);
 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
                                     struct ocfs2_alloc_context *ac,
+                                    handle_t *handle,
                                     u32 bits_wanted,
                                     u32 min_bits,
                                     u16 *bit_off,
@@ -79,20 +77,20 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
                                     u64 *bg_blkno);
 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
                                         int nr);
-static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_set_bits(handle_t *handle,
                                             struct inode *alloc_inode,
                                             struct ocfs2_group_desc *bg,
                                             struct buffer_head *group_bh,
                                             unsigned int bit_off,
                                             unsigned int num_bits);
-static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_clear_bits(handle_t *handle,
                                               struct inode *alloc_inode,
                                               struct ocfs2_group_desc *bg,
                                               struct buffer_head *group_bh,
                                               unsigned int bit_off,
                                               unsigned int num_bits);
 
-static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+static int ocfs2_relink_block_group(handle_t *handle,
                                    struct inode *alloc_inode,
                                    struct buffer_head *fe_bh,
                                    struct buffer_head *bg_bh,
@@ -100,7 +98,7 @@ static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
                                    u16 chain);
 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
                                                     u32 wanted);
-static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+static int ocfs2_free_suballoc_bits(handle_t *handle,
                                    struct inode *alloc_inode,
                                    struct buffer_head *alloc_bh,
                                    unsigned int start_bit,
@@ -120,8 +118,16 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode,
 
 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
 {
-       if (ac->ac_inode)
-               iput(ac->ac_inode);
+       struct inode *inode = ac->ac_inode;
+
+       if (inode) {
+               if (ac->ac_which != OCFS2_AC_USE_LOCAL)
+                       ocfs2_meta_unlock(inode, 1);
+
+               mutex_unlock(&inode->i_mutex);
+
+               iput(inode);
+       }
        if (ac->ac_bh)
                brelse(ac->ac_bh);
        kfree(ac);
@@ -190,7 +196,7 @@ static int ocfs2_check_group_descriptor(struct super_block *sb,
        return 0;
 }
 
-static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+static int ocfs2_block_group_fill(handle_t *handle,
                                  struct inode *alloc_inode,
                                  struct buffer_head *bg_bh,
                                  u64 group_blkno,
@@ -273,7 +279,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
        struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
        struct ocfs2_chain_list *cl;
        struct ocfs2_alloc_context *ac = NULL;
-       struct ocfs2_journal_handle *handle = NULL;
+       handle_t *handle = NULL;
        u32 bit_off, num_bits;
        u16 alloc_rec;
        u64 bg_blkno;
@@ -284,16 +290,8 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
 
        mlog_entry_void();
 
-       handle = ocfs2_alloc_handle(osb);
-       if (!handle) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
        cl = &fe->id2.i_chain;
        status = ocfs2_reserve_clusters(osb,
-                                       handle,
                                        le16_to_cpu(cl->cl_cpg),
                                        &ac);
        if (status < 0) {
@@ -304,7 +302,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
 
        credits = ocfs2_calc_group_alloc_credits(osb->sb,
                                                 le16_to_cpu(cl->cl_cpg));
-       handle = ocfs2_start_trans(osb, handle, credits);
+       handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                status = PTR_ERR(handle);
                handle = NULL;
@@ -389,7 +387,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
        status = 0;
 bail:
        if (handle)
-               ocfs2_commit_trans(handle);
+               ocfs2_commit_trans(osb, handle);
 
        if (ac)
                ocfs2_free_alloc_context(ac);
@@ -402,27 +400,38 @@ bail:
 }
 
 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
-                                      struct ocfs2_alloc_context *ac)
+                                      struct ocfs2_alloc_context *ac,
+                                      int type,
+                                      u32 slot)
 {
        int status;
        u32 bits_wanted = ac->ac_bits_wanted;
-       struct inode *alloc_inode = ac->ac_inode;
+       struct inode *alloc_inode;
        struct buffer_head *bh = NULL;
-       struct ocfs2_journal_handle *handle = ac->ac_handle;
        struct ocfs2_dinode *fe;
        u32 free_bits;
 
        mlog_entry_void();
 
-       BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
+       alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
+       if (!alloc_inode) {
+               mlog_errno(-EINVAL);
+               return -EINVAL;
+       }
 
-       ocfs2_handle_add_inode(handle, alloc_inode);
-       status = ocfs2_meta_lock(alloc_inode, handle, &bh, 1);
+       mutex_lock(&alloc_inode->i_mutex);
+
+       status = ocfs2_meta_lock(alloc_inode, &bh, 1);
        if (status < 0) {
+               mutex_unlock(&alloc_inode->i_mutex);
+               iput(alloc_inode);
+
                mlog_errno(status);
-               goto bail;
+               return status;
        }
 
+       ac->ac_inode = alloc_inode;
+
        fe = (struct ocfs2_dinode *) bh->b_data;
        if (!OCFS2_IS_VALID_DINODE(fe)) {
                OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
@@ -473,12 +482,11 @@ bail:
 }
 
 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
-                              struct ocfs2_journal_handle *handle,
                               struct ocfs2_dinode *fe,
                               struct ocfs2_alloc_context **ac)
 {
        int status;
-       struct inode *alloc_inode = NULL;
+       u32 slot;
 
        *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
        if (!(*ac)) {
@@ -488,28 +496,18 @@ int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
        }
 
        (*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe);
-       (*ac)->ac_handle = handle;
        (*ac)->ac_which = OCFS2_AC_USE_META;
 
 #ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
-       alloc_inode = ocfs2_get_system_file_inode(osb,
-                                                 EXTENT_ALLOC_SYSTEM_INODE,
-                                                 0);
+       slot = 0;
 #else
-       alloc_inode = ocfs2_get_system_file_inode(osb,
-                                                 EXTENT_ALLOC_SYSTEM_INODE,
-                                                 osb->slot_num);
+       slot = osb->slot_num;
 #endif
-       if (!alloc_inode) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
 
-       (*ac)->ac_inode = igrab(alloc_inode);
        (*ac)->ac_group_search = ocfs2_block_group_search;
 
-       status = ocfs2_reserve_suballoc_bits(osb, (*ac));
+       status = ocfs2_reserve_suballoc_bits(osb, (*ac),
+                                            EXTENT_ALLOC_SYSTEM_INODE, slot);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -523,19 +521,14 @@ bail:
                *ac = NULL;
        }
 
-       if (alloc_inode)
-               iput(alloc_inode);
-
        mlog_exit(status);
        return status;
 }
 
 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
                            struct ocfs2_alloc_context **ac)
 {
        int status;
-       struct inode *alloc_inode = NULL;
 
        *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
        if (!(*ac)) {
@@ -545,22 +538,13 @@ int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
        }
 
        (*ac)->ac_bits_wanted = 1;
-       (*ac)->ac_handle = handle;
        (*ac)->ac_which = OCFS2_AC_USE_INODE;
 
-       alloc_inode = ocfs2_get_system_file_inode(osb,
-                                                 INODE_ALLOC_SYSTEM_INODE,
-                                                 osb->slot_num);
-       if (!alloc_inode) {
-               status = -ENOMEM;
-               mlog_errno(status);
-               goto bail;
-       }
-
-       (*ac)->ac_inode = igrab(alloc_inode);
        (*ac)->ac_group_search = ocfs2_block_group_search;
 
-       status = ocfs2_reserve_suballoc_bits(osb, *ac);
+       status = ocfs2_reserve_suballoc_bits(osb, *ac,
+                                            INODE_ALLOC_SYSTEM_INODE,
+                                            osb->slot_num);
        if (status < 0) {
                if (status != -ENOSPC)
                        mlog_errno(status);
@@ -574,9 +558,6 @@ bail:
                *ac = NULL;
        }
 
-       if (alloc_inode)
-               iput(alloc_inode);
-
        mlog_exit(status);
        return status;
 }
@@ -588,20 +569,17 @@ int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
 {
        int status;
 
-       ac->ac_inode = ocfs2_get_system_file_inode(osb,
-                                                  GLOBAL_BITMAP_SYSTEM_INODE,
-                                                  OCFS2_INVALID_SLOT);
-       if (!ac->ac_inode) {
-               status = -EINVAL;
-               mlog(ML_ERROR, "Could not get bitmap inode!\n");
-               goto bail;
-       }
        ac->ac_which = OCFS2_AC_USE_MAIN;
        ac->ac_group_search = ocfs2_cluster_group_search;
 
-       status = ocfs2_reserve_suballoc_bits(osb, ac);
-       if (status < 0 && status != -ENOSPC)
+       status = ocfs2_reserve_suballoc_bits(osb, ac,
+                                            GLOBAL_BITMAP_SYSTEM_INODE,
+                                            OCFS2_INVALID_SLOT);
+       if (status < 0 && status != -ENOSPC) {
                mlog_errno(status);
+               goto bail;
+       }
+
 bail:
        return status;
 }
@@ -610,7 +588,6 @@ bail:
  * use so we figure it out for them, but unfortunately this clutters
  * things a bit. */
 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
-                          struct ocfs2_journal_handle *handle,
                           u32 bits_wanted,
                           struct ocfs2_alloc_context **ac)
 {
@@ -618,8 +595,6 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb,
 
        mlog_entry_void();
 
-       BUG_ON(!handle);
-
        *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
        if (!(*ac)) {
                status = -ENOMEM;
@@ -628,12 +603,10 @@ int ocfs2_reserve_clusters(struct ocfs2_super *osb,
        }
 
        (*ac)->ac_bits_wanted = bits_wanted;
-       (*ac)->ac_handle = handle;
 
        status = -ENOSPC;
        if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
                status = ocfs2_reserve_local_alloc_bits(osb,
-                                                       handle,
                                                        bits_wanted,
                                                        *ac);
                if ((status < 0) && (status != -ENOSPC)) {
@@ -774,7 +747,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
        return status;
 }
 
-static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_set_bits(handle_t *handle,
                                             struct inode *alloc_inode,
                                             struct ocfs2_group_desc *bg,
                                             struct buffer_head *group_bh,
@@ -845,7 +818,7 @@ static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
        return best;
 }
 
-static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+static int ocfs2_relink_block_group(handle_t *handle,
                                    struct inode *alloc_inode,
                                    struct buffer_head *fe_bh,
                                    struct buffer_head *bg_bh,
@@ -1025,7 +998,7 @@ static int ocfs2_block_group_search(struct inode *inode,
 }
 
 static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
-                                      struct ocfs2_journal_handle *handle,
+                                      handle_t *handle,
                                       struct buffer_head *di_bh,
                                       u32 num_bits,
                                       u16 chain)
@@ -1055,6 +1028,7 @@ out:
 }
 
 static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
+                                 handle_t *handle,
                                  u32 bits_wanted,
                                  u32 min_bits,
                                  u16 *bit_off,
@@ -1067,7 +1041,6 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
        struct buffer_head *group_bh = NULL;
        struct ocfs2_group_desc *gd;
        struct inode *alloc_inode = ac->ac_inode;
-       struct ocfs2_journal_handle *handle = ac->ac_handle;
 
        ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
                               &group_bh, OCFS2_BH_CACHED, alloc_inode);
@@ -1115,6 +1088,7 @@ out:
 }
 
 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+                             handle_t *handle,
                              u32 bits_wanted,
                              u32 min_bits,
                              u16 *bit_off,
@@ -1126,7 +1100,6 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
        u16 chain, tmp_bits;
        u32 tmp_used;
        u64 next_group;
-       struct ocfs2_journal_handle *handle = ac->ac_handle;
        struct inode *alloc_inode = ac->ac_inode;
        struct buffer_head *group_bh = NULL;
        struct buffer_head *prev_group_bh = NULL;
@@ -1272,6 +1245,7 @@ bail:
 /* will give out up to bits_wanted contiguous bits. */
 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
                                     struct ocfs2_alloc_context *ac,
+                                    handle_t *handle,
                                     u32 bits_wanted,
                                     u32 min_bits,
                                     u16 *bit_off,
@@ -1313,8 +1287,8 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
                 * by jumping straight to the most recently used
                 * allocation group. This helps us mantain some
                 * contiguousness across allocations. */
-               status = ocfs2_search_one_group(ac, bits_wanted, min_bits,
-                                               bit_off, num_bits,
+               status = ocfs2_search_one_group(ac, handle, bits_wanted,
+                                               min_bits, bit_off, num_bits,
                                                hint_blkno, &bits_left);
                if (!status) {
                        /* Be careful to update *bg_blkno here as the
@@ -1336,7 +1310,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
        ac->ac_chain = victim;
        ac->ac_allow_chain_relink = 1;
 
-       status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
+       status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
                                    num_bits, bg_blkno, &bits_left);
        if (!status)
                goto set_hint;
@@ -1360,7 +1334,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
                        continue;
 
                ac->ac_chain = i;
-               status = ocfs2_search_chain(ac, bits_wanted, min_bits,
+               status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
                                            bit_off, num_bits, bg_blkno,
                                            &bits_left);
                if (!status)
@@ -1388,7 +1362,7 @@ bail:
 }
 
 int ocfs2_claim_metadata(struct ocfs2_super *osb,
-                        struct ocfs2_journal_handle *handle,
+                        handle_t *handle,
                         struct ocfs2_alloc_context *ac,
                         u32 bits_wanted,
                         u16 *suballoc_bit_start,
@@ -1401,10 +1375,10 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
        BUG_ON(!ac);
        BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
        BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
-       BUG_ON(ac->ac_handle != handle);
 
        status = ocfs2_claim_suballoc_bits(osb,
                                           ac,
+                                          handle,
                                           bits_wanted,
                                           1,
                                           suballoc_bit_start,
@@ -1425,7 +1399,7 @@ bail:
 }
 
 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
-                         struct ocfs2_journal_handle *handle,
+                         handle_t *handle,
                          struct ocfs2_alloc_context *ac,
                          u16 *suballoc_bit,
                          u64 *fe_blkno)
@@ -1440,10 +1414,10 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
        BUG_ON(ac->ac_bits_given != 0);
        BUG_ON(ac->ac_bits_wanted != 1);
        BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
-       BUG_ON(ac->ac_handle != handle);
 
        status = ocfs2_claim_suballoc_bits(osb,
                                           ac,
+                                          handle,
                                           1,
                                           1,
                                           suballoc_bit,
@@ -1528,7 +1502,7 @@ static inline void ocfs2_block_to_cluster_group(struct inode *inode,
  * of any size.
  */
 int ocfs2_claim_clusters(struct ocfs2_super *osb,
-                        struct ocfs2_journal_handle *handle,
+                        handle_t *handle,
                         struct ocfs2_alloc_context *ac,
                         u32 min_clusters,
                         u32 *cluster_start,
@@ -1546,7 +1520,6 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
 
        BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
               && ac->ac_which != OCFS2_AC_USE_MAIN);
-       BUG_ON(ac->ac_handle != handle);
 
        if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
                status = ocfs2_claim_local_alloc_bits(osb,
@@ -1572,6 +1545,7 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
 
                status = ocfs2_claim_suballoc_bits(osb,
                                                   ac,
+                                                  handle,
                                                   bits_wanted,
                                                   min_clusters,
                                                   &bg_bit_off,
@@ -1598,7 +1572,7 @@ bail:
        return status;
 }
 
-static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+static inline int ocfs2_block_group_clear_bits(handle_t *handle,
                                               struct inode *alloc_inode,
                                               struct ocfs2_group_desc *bg,
                                               struct buffer_head *group_bh,
@@ -1653,7 +1627,7 @@ bail:
 /*
  * expects the suballoc inode to already be locked.
  */
-static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+static int ocfs2_free_suballoc_bits(handle_t *handle,
                                    struct inode *alloc_inode,
                                    struct buffer_head *alloc_bh,
                                    unsigned int start_bit,
@@ -1737,7 +1711,7 @@ static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
        return group;
 }
 
-int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+int ocfs2_free_dinode(handle_t *handle,
                      struct inode *inode_alloc_inode,
                      struct buffer_head *inode_alloc_bh,
                      struct ocfs2_dinode *di)
@@ -1750,7 +1724,7 @@ int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
                                        inode_alloc_bh, bit, bg_blkno, 1);
 }
 
-int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+int ocfs2_free_extent_block(handle_t *handle,
                            struct inode *eb_alloc_inode,
                            struct buffer_head *eb_alloc_bh,
                            struct ocfs2_extent_block *eb)
@@ -1763,7 +1737,7 @@ int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
                                        bit, bg_blkno, 1);
 }
 
-int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+int ocfs2_free_clusters(handle_t *handle,
                       struct inode *bitmap_inode,
                       struct buffer_head *bitmap_bh,
                       u64 start_blk,
index c787838d1052474d335b2754c88cf0a1d68f06c1..1a3c94cb92508c8ef7631bc6e363894ef9d75874 100644 (file)
@@ -43,7 +43,6 @@ struct ocfs2_alloc_context {
 #define OCFS2_AC_USE_INODE 3
 #define OCFS2_AC_USE_META  4
        u32    ac_which;
-       struct ocfs2_journal_handle *ac_handle;
 
        /* these are used by the chain search */
        u16    ac_chain;
@@ -60,45 +59,42 @@ static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
 }
 
 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
-                              struct ocfs2_journal_handle *handle,
                               struct ocfs2_dinode *fe,
                               struct ocfs2_alloc_context **ac);
 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
-                           struct ocfs2_journal_handle *handle,
                            struct ocfs2_alloc_context **ac);
 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
-                          struct ocfs2_journal_handle *handle,
                           u32 bits_wanted,
                           struct ocfs2_alloc_context **ac);
 
 int ocfs2_claim_metadata(struct ocfs2_super *osb,
-                        struct ocfs2_journal_handle *handle,
+                        handle_t *handle,
                         struct ocfs2_alloc_context *ac,
                         u32 bits_wanted,
                         u16 *suballoc_bit_start,
                         u32 *num_bits,
                         u64 *blkno_start);
 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
-                         struct ocfs2_journal_handle *handle,
+                         handle_t *handle,
                          struct ocfs2_alloc_context *ac,
                          u16 *suballoc_bit,
                          u64 *fe_blkno);
 int ocfs2_claim_clusters(struct ocfs2_super *osb,
-                        struct ocfs2_journal_handle *handle,
+                        handle_t *handle,
                         struct ocfs2_alloc_context *ac,
                         u32 min_clusters,
                         u32 *cluster_start,
                         u32 *num_clusters);
 
-int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+int ocfs2_free_dinode(handle_t *handle,
                      struct inode *inode_alloc_inode,
                      struct buffer_head *inode_alloc_bh,
                      struct ocfs2_dinode *di);
-int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+int ocfs2_free_extent_block(handle_t *handle,
                            struct inode *eb_alloc_inode,
                            struct buffer_head *eb_alloc_bh,
                            struct ocfs2_extent_block *eb);
-int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+int ocfs2_free_clusters(handle_t *handle,
                        struct inode *bitmap_inode,
                        struct buffer_head *bitmap_bh,
                        u64 start_blk,
index 76b46ebbb10c1b36f17b11879f329b77433ec825..4bf39540e652fb85b99e62045bf3e84646e5f72d 100644 (file)
@@ -68,9 +68,7 @@
 
 #include "buffer_head_io.h"
 
-static kmem_cache_t *ocfs2_inode_cachep = NULL;
-
-kmem_cache_t *ocfs2_lock_cache = NULL;
+static struct kmem_cache *ocfs2_inode_cachep = NULL;
 
 /* OCFS2 needs to schedule several differnt types of work which
  * require cluster locking, disk I/O, recovery waits, etc. Since these
@@ -141,6 +139,7 @@ enum {
        Opt_hb_local,
        Opt_data_ordered,
        Opt_data_writeback,
+       Opt_atime_quantum,
        Opt_err,
 };
 
@@ -154,6 +153,7 @@ static match_table_t tokens = {
        {Opt_hb_local, OCFS2_HB_LOCAL},
        {Opt_data_ordered, "data=ordered"},
        {Opt_data_writeback, "data=writeback"},
+       {Opt_atime_quantum, "atime_quantum=%u"},
        {Opt_err, NULL}
 };
 
@@ -303,7 +303,7 @@ static struct inode *ocfs2_alloc_inode(struct super_block *sb)
 {
        struct ocfs2_inode_info *oi;
 
-       oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS);
+       oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS);
        if (!oi)
                return NULL;
 
@@ -707,6 +707,7 @@ static int ocfs2_parse_options(struct super_block *sb,
        while ((p = strsep(&options, ",")) != NULL) {
                int token, option;
                substring_t args[MAX_OPT_ARGS];
+               struct ocfs2_super * osb = OCFS2_SB(sb);
 
                if (!*p)
                        continue;
@@ -747,6 +748,16 @@ static int ocfs2_parse_options(struct super_block *sb,
                case Opt_data_writeback:
                        *mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
                        break;
+               case Opt_atime_quantum:
+                       if (match_int(&args[0], &option)) {
+                               status = 0;
+                               goto bail;
+                       }
+                       if (option >= 0)
+                               osb->s_atime_quantum = option;
+                       else
+                               osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+                       break;
                default:
                        mlog(ML_ERROR,
                             "Unrecognized mount option \"%s\" "
@@ -867,7 +878,7 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
                goto bail;
        }
 
-       status = ocfs2_meta_lock(inode, NULL, &bh, 0);
+       status = ocfs2_meta_lock(inode, &bh, 0);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
@@ -903,7 +914,7 @@ bail:
 }
 
 static void ocfs2_inode_init_once(void *data,
-                                 kmem_cache_t *cachep,
+                                 struct kmem_cache *cachep,
                                  unsigned long flags)
 {
        struct ocfs2_inode_info *oi = data;
@@ -914,9 +925,7 @@ static void ocfs2_inode_init_once(void *data,
                oi->ip_open_count = 0;
                spin_lock_init(&oi->ip_lock);
                ocfs2_extent_map_init(&oi->vfs_inode);
-               INIT_LIST_HEAD(&oi->ip_handle_list);
                INIT_LIST_HEAD(&oi->ip_io_markers);
-               oi->ip_handle = NULL;
                oi->ip_created_trans = 0;
                oi->ip_last_trans = 0;
                oi->ip_dir_start_lookup = 0;
@@ -948,14 +957,6 @@ static int ocfs2_initialize_mem_caches(void)
        if (!ocfs2_inode_cachep)
                return -ENOMEM;
 
-       ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
-                                            sizeof(struct ocfs2_journal_lock),
-                                            0,
-                                            SLAB_HWCACHE_ALIGN,
-                                            NULL, NULL);
-       if (!ocfs2_lock_cache)
-               return -ENOMEM;
-
        return 0;
 }
 
@@ -963,11 +964,8 @@ static void ocfs2_free_mem_caches(void)
 {
        if (ocfs2_inode_cachep)
                kmem_cache_destroy(ocfs2_inode_cachep);
-       if (ocfs2_lock_cache)
-               kmem_cache_destroy(ocfs2_lock_cache);
 
        ocfs2_inode_cachep = NULL;
-       ocfs2_lock_cache = NULL;
 }
 
 static int ocfs2_get_sector(struct super_block *sb,
@@ -1280,6 +1278,8 @@ static int ocfs2_initialize_super(struct super_block *sb,
        init_waitqueue_head(&osb->checkpoint_event);
        atomic_set(&osb->needs_checkpoint, 0);
 
+       osb->s_atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
+
        osb->node_num = O2NM_INVALID_NODE_NUM;
        osb->slot_num = OCFS2_INVALID_SLOT;
 
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        spin_lock_init(&journal->j_lock);
        journal->j_trans_id = (unsigned long) 1;
        INIT_LIST_HEAD(&journal->j_la_cleanups);
-       INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+       INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
        journal->j_state = OCFS2_JOURNAL_FREE;
 
        /* get some pseudo constants for clustersize bits */
@@ -1674,7 +1674,7 @@ void __ocfs2_error(struct super_block *sb,
        va_list args;
 
        va_start(args, fmt);
-       vsprintf(error_buf, fmt, args);
+       vsnprintf(error_buf, sizeof(error_buf), fmt, args);
        va_end(args);
 
        /* Not using mlog here because we want to show the actual
@@ -1695,7 +1695,7 @@ void __ocfs2_abort(struct super_block* sb,
        va_list args;
 
        va_start(args, fmt);
-       vsprintf(error_buf, fmt, args);
+       vsnprintf(error_buf, sizeof(error_buf), fmt, args);
        va_end(args);
 
        printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
index c0f68aa6c17515c3353547ab1dd78ee169eb4b6a..957d6878b03ef4bfa307d91d7f55f6fd0e0c885b 100644 (file)
@@ -126,6 +126,10 @@ static int ocfs2_readlink(struct dentry *dentry,
                goto out;
        }
 
+       /*
+        * Without vfsmount we can't update atime now,
+        * but we will update atime here ultimately.
+        */
        ret = vfs_readlink(dentry, buffer, buflen, link);
 
        brelse(bh);
index 9707ed7a3206d5055587e5cea8555b17b8e61d8b..39814b900fc0dda863895768dda78e1ab64afa45 100644 (file)
@@ -69,7 +69,7 @@ struct ocfs2_meta_cache_item {
        sector_t        c_block;
 };
 
-static kmem_cache_t *ocfs2_uptodate_cachep = NULL;
+static struct kmem_cache *ocfs2_uptodate_cachep = NULL;
 
 void ocfs2_metadata_cache_init(struct inode *inode)
 {
index 592a6402e8511e945c316aa6637a39e1f0698d44..26f44e0074ecfb0799691ca6b1cd21cf4273fd4a 100644 (file)
@@ -330,13 +330,13 @@ out:
        return 0;
 }
 
-static kmem_cache_t *op_inode_cachep;
+static struct kmem_cache *op_inode_cachep;
 
 static struct inode *openprom_alloc_inode(struct super_block *sb)
 {
        struct op_inode_info *oi;
 
-       oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL);
+       oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
        if (!oi)
                return NULL;
 
@@ -415,7 +415,7 @@ static struct file_system_type openprom_fs_type = {
        .kill_sb        = kill_anon_super,
 };
 
-static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags)
+static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags)
 {
        struct op_inode_info *oi = (struct op_inode_info *) data;
 
index 3068528890a6255191d813195fb636363584f2e5..9917a8c360f2da0537baea01559b468825476521 100644 (file)
@@ -43,6 +43,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
                        if (warn_no_part)
                                printk("Dev %s: unable to read RDB block %d\n",
                                       bdevname(bdev, b), blk);
+                       res = -1;
                        goto rdb_done;
                }
                if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
@@ -79,6 +80,7 @@ amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
                        if (warn_no_part)
                                printk("Dev %s: unable to read partition block %d\n",
                                       bdevname(bdev, b), blk);
+                       res = -1;
                        goto rdb_done;
                }
                pb  = (struct PartitionBlock *)data;
index 192a6adfdefddb504616436969d24732203ff1f8..1f3572d5b755127ed2043a7329815687dff06f8e 100644 (file)
@@ -88,7 +88,7 @@ int atari_partition(struct parsed_partitions *state, struct block_device *bdev)
                        if (!xrs) {
                                printk (" block %ld read failed\n", partsect);
                                put_dev_sector(sect);
-                               return 0;
+                               return -1;
                        }
 
                        /* ++roman: sanity check: bit 0 of flg field must be set */
index 6fb4b6150d7701cd57085f80a0f9222f31be53ed..1901137f4eca3d66e46e30bec858f7c35ff154f8 100644 (file)
@@ -153,7 +153,7 @@ static struct parsed_partitions *
 check_partition(struct gendisk *hd, struct block_device *bdev)
 {
        struct parsed_partitions *state;
-       int i, res;
+       int i, res, err;
 
        state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
        if (!state)
@@ -165,19 +165,30 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
                sprintf(state->name, "p");
 
        state->limit = hd->minors;
-       i = res = 0;
+       i = res = err = 0;
        while (!res && check_part[i]) {
                memset(&state->parts, 0, sizeof(state->parts));
                res = check_part[i++](state, bdev);
+               if (res < 0) {
+                       /* We have hit an I/O error which we don't report now.
+                       * But record it, and let the others do their job.
+                       */
+                       err = res;
+                       res = 0;
+               }
+
        }
        if (res > 0)
                return state;
+       if (!err)
+       /* The partition is unrecognized. So report I/O errors if there were any */
+               res = err;
        if (!res)
                printk(" unknown partition table\n");
        else if (warn_no_part)
                printk(" unable to read partition table\n");
        kfree(state);
-       return NULL;
+       return ERR_PTR(res);
 }
 
 /*
@@ -494,6 +505,8 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
                disk->fops->revalidate_disk(disk);
        if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
                return 0;
+       if (IS_ERR(state))      /* I/O error reading the partition table */
+               return PTR_ERR(state);
        for (p = 1; p < state->limit; p++) {
                sector_t size = state->parts[p].size;
                sector_t from = state->parts[p].from;
index d352a7381fed88ff20f5f1c7b9b6365a2264e436..9f7ad4244f63d64ed723939c568fc88de53ae7c3 100644 (file)
@@ -43,7 +43,7 @@ cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
 int
 ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
 {
-       int blocksize, offset, size;
+       int blocksize, offset, size,res;
        loff_t i_size;
        dasd_information_t *info;
        struct hd_geometry *geo;
@@ -56,15 +56,16 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
        unsigned char *data;
        Sector sect;
 
+       res = 0;
        blocksize = bdev_hardsect_size(bdev);
        if (blocksize <= 0)
-               return 0;
+               goto out_exit;
        i_size = i_size_read(bdev->bd_inode);
        if (i_size == 0)
-               return 0;
+               goto out_exit;
 
        if ((info = kmalloc(sizeof(dasd_information_t), GFP_KERNEL)) == NULL)
-               goto out_noinfo;
+               goto out_exit;
        if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
                goto out_nogeo;
        if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
@@ -72,7 +73,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
 
        if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
            ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
-               goto out_noioctl;
+               goto out_freeall;
 
        /*
         * Get volume label, extract name and type.
@@ -92,6 +93,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
        EBCASC(type, 4);
        EBCASC(name, 6);
 
+       res = 1;
+
        /*
         * Three different types: CMS1, VOL1 and LNX1/unlabeled
         */
@@ -156,6 +159,9 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
                        counter++;
                        blk++;
                }
+               if (!data)
+               /* Are we not supposed to report this ? */
+                       goto out_readerr;
        } else {
                /*
                 * Old style LNX1 or unlabeled disk
@@ -171,18 +177,17 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
        }
 
        printk("\n");
-       kfree(label);
-       kfree(geo);
-       kfree(info);
-       return 1;
+       goto out_freeall;
+
 
 out_readerr:
-out_noioctl:
+       res = -1;
+out_freeall:
        kfree(label);
 out_nolab:
        kfree(geo);
 out_nogeo:
        kfree(info);
-out_noinfo:
-       return 0;
+out_exit:
+       return res;
 }
index b1626f269a3445e38f34c1bc338734e0893bbd32..ae36b89b1a37040a0ff46b241c02f1022b62918f 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -830,7 +830,14 @@ void free_pipe_info(struct inode *inode)
 static struct vfsmount *pipe_mnt __read_mostly;
 static int pipefs_delete_dentry(struct dentry *dentry)
 {
-       return 1;
+       /*
+        * At creation time, we pretended this dentry was hashed
+        * (by clearing DCACHE_UNHASHED bit in d_flags)
+        * At delete time, we restore the truth : not hashed.
+        * (so that dput() can proceed correctly)
+        */
+       dentry->d_flags |= DCACHE_UNHASHED;
+       return 0;
 }
 
 static struct dentry_operations pipefs_dentry_operations = {
@@ -891,17 +898,22 @@ struct file *create_write_pipe(void)
        if (!inode)
                goto err_file;
 
-       sprintf(name, "[%lu]", inode->i_ino);
+       this.len = sprintf(name, "[%lu]", inode->i_ino);
        this.name = name;
-       this.len = strlen(name);
-       this.hash = inode->i_ino; /* will go */
+       this.hash = 0;
        err = -ENOMEM;
        dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
        if (!dentry)
                goto err_inode;
 
        dentry->d_op = &pipefs_dentry_operations;
-       d_add(dentry, inode);
+       /*
+        * We dont want to publish this dentry into global dentry hash table.
+        * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+        * This permits a working /proc/$pid/fd/XXX on pipes
+        */
+       dentry->d_flags &= ~DCACHE_UNHASHED;
+       d_instantiate(dentry, inode);
        f->f_vfsmnt = mntget(pipe_mnt);
        f->f_dentry = dentry;
        f->f_mapping = inode->i_mapping;
index 7431d7ba2d097981e1e87a191dc0ab81a2684537..f6c7762725727b49af2e7b4afbdff56a2440f822 100644 (file)
@@ -8,8 +8,9 @@ proc-y                  := nommu.o task_nommu.o
 proc-$(CONFIG_MMU)     := mmu.o task_mmu.o
 
 proc-y       += inode.o root.o base.o generic.o array.o \
-               kmsg.o proc_tty.o proc_misc.o
+               proc_tty.o proc_misc.o
 
 proc-$(CONFIG_PROC_KCORE)      += kcore.o
 proc-$(CONFIG_PROC_VMCORE)     += vmcore.o
 proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
+proc-$(CONFIG_PRINTK)  += kmsg.o
index 795319c54f7283430f2ec28ed346d39af6291e34..b859fc749c07115d2e2bbff251b347fa89e2f9c1 100644 (file)
@@ -683,8 +683,6 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        char buffer[PROC_NUMBUF], *end;
        int oom_adjust;
 
-       if (!capable(CAP_SYS_RESOURCE))
-               return -EPERM;
        memset(buffer, 0, sizeof(buffer));
        if (count > sizeof(buffer) - 1)
                count = sizeof(buffer) - 1;
@@ -699,6 +697,10 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
        task = get_proc_task(file->f_dentry->d_inode);
        if (!task)
                return -ESRCH;
+       if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) {
+               put_task_struct(task);
+               return -EACCES;
+       }
        task->oomkilladj = oom_adjust;
        put_task_struct(task);
        if (end - buffer == 0)
@@ -1883,8 +1885,9 @@ out:
        return;
 }
 
-struct dentry *proc_pid_instantiate(struct inode *dir,
-       struct dentry * dentry, struct task_struct *task, void *ptr)
+static struct dentry *proc_pid_instantiate(struct inode *dir,
+                                          struct dentry * dentry,
+                                          struct task_struct *task, void *ptr)
 {
        struct dentry *error = ERR_PTR(-ENOENT);
        struct inode *inode;
index 49dfb2ab783e6f921fdd344f1b6c39ee29cd1935..e26945ba685b5c80f0abdca78f4631db7d64f992 100644 (file)
@@ -81,14 +81,14 @@ static void proc_read_inode(struct inode * inode)
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
 }
 
-static kmem_cache_t * proc_inode_cachep;
+static struct kmem_cache * proc_inode_cachep;
 
 static struct inode *proc_alloc_inode(struct super_block *sb)
 {
        struct proc_inode *ei;
        struct inode *inode;
 
-       ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL);
+       ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        ei->pid = NULL;
@@ -105,7 +105,7 @@ static void proc_destroy_inode(struct inode *inode)
        kmem_cache_free(proc_inode_cachep, PROC_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct proc_inode *ei = (struct proc_inode *) foo;
 
index 1294eda4acaeefc5739bd4ad267a7fca2dd679dd..1be73082edd388bd443f0be5c559cdf756eccbbb 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
+#define CORE_STR "CORE"
 
 static int open_kcore(struct inode * inode, struct file * filp)
 {
@@ -82,10 +83,11 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
        }
        *elf_buflen =   sizeof(struct elfhdr) + 
                        (*nphdr + 2)*sizeof(struct elf_phdr) + 
-                       3 * (sizeof(struct elf_note) + 4) +
-                       sizeof(struct elf_prstatus) +
-                       sizeof(struct elf_prpsinfo) +
-                       sizeof(struct task_struct);
+                       3 * ((sizeof(struct elf_note)) +
+                            roundup(sizeof(CORE_STR), 4)) +
+                       roundup(sizeof(struct elf_prstatus), 4) +
+                       roundup(sizeof(struct elf_prpsinfo), 4) +
+                       roundup(sizeof(struct task_struct), 4);
        *elf_buflen = PAGE_ALIGN(*elf_buflen);
        return size + *elf_buflen;
 }
@@ -210,7 +212,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
        nhdr->p_offset  = offset;
 
        /* set up the process status */
-       notes[0].name = "CORE";
+       notes[0].name = CORE_STR;
        notes[0].type = NT_PRSTATUS;
        notes[0].datasz = sizeof(struct elf_prstatus);
        notes[0].data = &prstatus;
@@ -221,7 +223,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
        bufp = storenote(&notes[0], bufp);
 
        /* set up the process info */
-       notes[1].name   = "CORE";
+       notes[1].name   = CORE_STR;
        notes[1].type   = NT_PRPSINFO;
        notes[1].datasz = sizeof(struct elf_prpsinfo);
        notes[1].data   = &prpsinfo;
@@ -238,7 +240,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
        bufp = storenote(&notes[1], bufp);
 
        /* set up the task structure */
-       notes[2].name   = "CORE";
+       notes[2].name   = CORE_STR;
        notes[2].type   = NT_TASKSTRUCT;
        notes[2].datasz = sizeof(struct task_struct);
        notes[2].data   = current;
index 93c43b676e59f374813e9ae7095470ce9c23b87c..51815cece6f37cd92852d6b753049f12ce19bad6 100644 (file)
@@ -696,9 +696,11 @@ void __init proc_misc_init(void)
        proc_symlink("mounts", NULL, "self/mounts");
 
        /* And now for trickier ones */
+#ifdef CONFIG_PRINTK
        entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
        if (entry)
                entry->proc_fops = &proc_kmsg_operations;
+#endif
        create_seq_entry("devices", 0, &proc_devinfo_operations);
        create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
 #ifdef CONFIG_BLOCK
index 5a41db2a218dedccfad415885fd54727d0c2fade..c047dc654d5c4999ead8383f190b2013d6f14d11 100644 (file)
@@ -515,12 +515,12 @@ static void qnx4_read_inode(struct inode *inode)
        brelse(bh);
 }
 
-static kmem_cache_t *qnx4_inode_cachep;
+static struct kmem_cache *qnx4_inode_cachep;
 
 static struct inode *qnx4_alloc_inode(struct super_block *sb)
 {
        struct qnx4_inode_info *ei;
-       ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL);
+       ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -531,7 +531,7 @@ static void qnx4_destroy_inode(struct inode *inode)
        kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep,
+static void init_once(void *foo, struct kmem_cache * cachep,
                      unsigned long flags)
 {
        struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
index ac14318c81ba71eaf944e120c26f4b9630d16b0e..373d862c3f87da6c12650a7427a64eb5740c5481 100644 (file)
@@ -317,12 +317,11 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl
                        /* area filled with zeroes, to supply as list of zero blocknumbers
                           We allocate it outside of loop just in case loop would spin for
                           several iterations. */
-                       char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);      // We cannot insert more than MAX_ITEM_LEN bytes anyway.
+                       char *zeros = kzalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC);      // We cannot insert more than MAX_ITEM_LEN bytes anyway.
                        if (!zeros) {
                                res = -ENOMEM;
                                goto error_exit_free_blocks;
                        }
-                       memset(zeros, 0, to_paste * UNFM_P_SIZE);
                        do {
                                to_paste =
                                    min_t(__u64, hole_size,
@@ -407,6 +406,8 @@ static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handl
                                   we restart it. This will also free the path. */
                                if (journal_transaction_should_end
                                    (th, th->t_blocks_allocated)) {
+                                       inode->i_size = cpu_key_k_offset(&key) +
+                                               (to_paste << inode->i_blkbits);
                                        res =
                                            restart_transaction(th, inode,
                                                                &path);
@@ -1045,6 +1046,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
                        char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
                        memset(kaddr, 0, from);
                        kunmap_atomic(kaddr, KM_USER0);
+                       flush_dcache_page(prepared_pages[0]);
                }
                if (to != PAGE_CACHE_SIZE) {    /* Last page needs to be partially zeroed */
                        char *kaddr =
@@ -1052,6 +1054,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
                                        KM_USER0);
                        memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
                        kunmap_atomic(kaddr, KM_USER0);
+                       flush_dcache_page(prepared_pages[num_pages - 1]);
                }
 
                /* Since all blocks are new - use already calculated value */
@@ -1185,6 +1188,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
                                        memset(kaddr + block_start, 0,
                                               from - block_start);
                                        kunmap_atomic(kaddr, KM_USER0);
+                                       flush_dcache_page(prepared_pages[0]);
                                        set_buffer_uptodate(bh);
                                }
                        }
@@ -1222,6 +1226,7 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
                                                        KM_USER0);
                                        memset(kaddr + to, 0, block_end - to);
                                        kunmap_atomic(kaddr, KM_USER0);
+                                       flush_dcache_page(prepared_pages[num_pages - 1]);
                                        set_buffer_uptodate(bh);
                                }
                        }
@@ -1307,56 +1312,8 @@ static ssize_t reiserfs_file_write(struct file *file,    /* the file we are going t
                        count = MAX_NON_LFS - (unsigned long)*ppos;
        }
 
-       if (file->f_flags & O_DIRECT) { // Direct IO needs treatment
-               ssize_t result, after_file_end = 0;
-               if ((*ppos + count >= inode->i_size)
-                   || (file->f_flags & O_APPEND)) {
-                       /* If we are appending a file, we need to put this savelink in here.
-                          If we will crash while doing direct io, finish_unfinished will
-                          cut the garbage from the file end. */
-                       reiserfs_write_lock(inode->i_sb);
-                       err =
-                           journal_begin(&th, inode->i_sb,
-                                         JOURNAL_PER_BALANCE_CNT);
-                       if (err) {
-                               reiserfs_write_unlock(inode->i_sb);
-                               return err;
-                       }
-                       reiserfs_update_inode_transaction(inode);
-                       add_save_link(&th, inode, 1 /* Truncate */ );
-                       after_file_end = 1;
-                       err =
-                           journal_end(&th, inode->i_sb,
-                                       JOURNAL_PER_BALANCE_CNT);
-                       reiserfs_write_unlock(inode->i_sb);
-                       if (err)
-                               return err;
-               }
-               result = do_sync_write(file, buf, count, ppos);
-
-               if (after_file_end) {   /* Now update i_size and remove the savelink */
-                       struct reiserfs_transaction_handle th;
-                       reiserfs_write_lock(inode->i_sb);
-                       err = journal_begin(&th, inode->i_sb, 1);
-                       if (err) {
-                               reiserfs_write_unlock(inode->i_sb);
-                               return err;
-                       }
-                       reiserfs_update_inode_transaction(inode);
-                       mark_inode_dirty(inode);
-                       err = journal_end(&th, inode->i_sb, 1);
-                       if (err) {
-                               reiserfs_write_unlock(inode->i_sb);
-                               return err;
-                       }
-                       err = remove_save_link(inode, 1 /* truncate */ );
-                       reiserfs_write_unlock(inode->i_sb);
-                       if (err)
-                               return err;
-               }
-
-               return result;
-       }
+       if (file->f_flags & O_DIRECT)
+               return do_sync_write(file, buf, count, ppos);
 
        if (unlikely((ssize_t) count < 0))
                return -EINVAL;
index 9c69bcacad2286c1e7a926bf2a5000ae9d84da64..254239e6f9e39b1f1d87eaa4f2e12f209599c870 100644 (file)
@@ -216,11 +216,12 @@ static int file_capable(struct inode *inode, long block)
        BUG_ON(!th->t_trans_id);
        BUG_ON(!th->t_refcount);
 
+       pathrelse(path);
+
        /* we cannot restart while nested */
        if (th->t_refcount > 1) {
                return 0;
        }
-       pathrelse(path);
        reiserfs_update_sd(th, inode);
        err = journal_end(th, s, len);
        if (!err) {
@@ -928,15 +929,12 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
                        if (blocks_needed == 1) {
                                un = &unf_single;
                        } else {
-                               un = kmalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);      // We need to avoid scheduling.
+                               un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);      // We need to avoid scheduling.
                                if (!un) {
                                        un = &unf_single;
                                        blocks_needed = 1;
                                        max_to_insert = 0;
-                               } else
-                                       memset(un, 0,
-                                              UNFM_P_SIZE * min(blocks_needed,
-                                                                max_to_insert));
+                               }
                        }
                        if (blocks_needed <= max_to_insert) {
                                /* we are going to add target block to the file. Use allocated
index ac93174c96398a5b6cdc3a21be94026b1f375765..7280a23ef3444aa4b8370dcb99b8ee46ee246dec 100644 (file)
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super,
                               struct reiserfs_journal *journal);
 static int dirty_one_transaction(struct super_block *s,
                                 struct reiserfs_journal_list *jl);
-static void flush_async_commits(void *p);
+static void flush_async_commits(struct work_struct *work);
 static void queue_log_writer(struct super_block *s);
 
 /* values for join in do_journal_begin_r */
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
        if (reiserfs_mounted_fs_count <= 1)
                commit_wq = create_workqueue("reiserfs");
 
-       INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
+       INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+       journal->j_work_sb = p_s_sb;
        return 0;
       free_and_return:
        free_journal_ram(p_s_sb);
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th,
 /*
 ** writeback the pending async commits to disk
 */
-static void flush_async_commits(void *p)
+static void flush_async_commits(struct work_struct *work)
 {
-       struct super_block *p_s_sb = p;
-       struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+       struct reiserfs_journal *journal =
+               container_of(work, struct reiserfs_journal, j_work.work);
+       struct super_block *p_s_sb = journal->j_work_sb;
        struct reiserfs_journal_list *jl;
        struct list_head *entry;
 
index 17249994110f5fcf1f8a9f07c18bfb3a35710ca2..7fb5fb036f90e2c48413541ffe58cb5006277667 100644 (file)
@@ -490,13 +490,13 @@ static void reiserfs_put_super(struct super_block *s)
        return;
 }
 
-static kmem_cache_t *reiserfs_inode_cachep;
+static struct kmem_cache *reiserfs_inode_cachep;
 
 static struct inode *reiserfs_alloc_inode(struct super_block *sb)
 {
        struct reiserfs_inode_info *ei;
        ei = (struct reiserfs_inode_info *)
-           kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL);
+           kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -507,7 +507,7 @@ static void reiserfs_destroy_inode(struct inode *inode)
        kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode));
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo;
 
@@ -1549,13 +1549,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
        struct reiserfs_sb_info *sbi;
        int errval = -EINVAL;
 
-       sbi = kmalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
+       sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
        if (!sbi) {
                errval = -ENOMEM;
                goto error;
        }
        s->s_fs_info = sbi;
-       memset(sbi, 0, sizeof(struct reiserfs_sb_info));
        /* Set default values for options: non-aggressive tails, RO on errors */
        REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
        REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO);
index ddcd9e1ef2828a52e91e1d5a71754509dc5f8dbb..c5af088d4a4ca6f4de0c9f07d20f873d9b8847e6 100644 (file)
@@ -550,12 +550,12 @@ romfs_read_inode(struct inode *i)
        }
 }
 
-static kmem_cache_t * romfs_inode_cachep;
+static struct kmem_cache * romfs_inode_cachep;
 
 static struct inode *romfs_alloc_inode(struct super_block *sb)
 {
        struct romfs_inode_info *ei;
-       ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL);
+       ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -566,7 +566,7 @@ static void romfs_destroy_inode(struct inode *inode)
        kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct romfs_inode_info *ei = (struct romfs_inode_info *) foo;
 
index 555b9ac04c25be9ac5354bea2810ef79290ab92e..10690aa401c7740fbd3d7cab8548e618f89f8474 100644 (file)
@@ -26,7 +26,7 @@
  *     ERR_PTR(error).  In the end of sequence they return %NULL. ->show()
  *     returns 0 in case of success and negative number in case of error.
  */
-int seq_open(struct file *file, struct seq_operations *op)
+int seq_open(struct file *file, const struct seq_operations *op)
 {
        struct seq_file *p = file->private_data;
 
@@ -408,7 +408,7 @@ EXPORT_SYMBOL(single_open);
 
 int single_release(struct inode *inode, struct file *file)
 {
-       struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
+       const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
        int res = seq_release(inode, file);
        kfree(op);
        return res;
index 2c122ee83adbdd3d693c2c8a4fb671aeca4b85f3..4af4cd729a5a8589706e536d3860175c69b90d2d 100644 (file)
@@ -50,12 +50,12 @@ static void smb_put_super(struct super_block *);
 static int  smb_statfs(struct dentry *, struct kstatfs *);
 static int  smb_show_options(struct seq_file *, struct vfsmount *);
 
-static kmem_cache_t *smb_inode_cachep;
+static struct kmem_cache *smb_inode_cachep;
 
 static struct inode *smb_alloc_inode(struct super_block *sb)
 {
        struct smb_inode_info *ei;
-       ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL);
+       ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
@@ -66,7 +66,7 @@ static void smb_destroy_inode(struct inode *inode)
        kmem_cache_free(smb_inode_cachep, SMB_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct smb_inode_info *ei = (struct smb_inode_info *) foo;
        unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
index 0fb74697abc401694ea0a0e9aed4f8352a5213c5..a4bcae8a9aff23f0835e996427d38036afb73c47 100644 (file)
@@ -25,7 +25,7 @@
 #define ROUND_UP(x) (((x)+3) & ~3)
 
 /* cache for request structures */
-static kmem_cache_t *req_cachep;
+static struct kmem_cache *req_cachep;
 
 static int smb_request_send_req(struct smb_request *req);
 
@@ -61,7 +61,7 @@ static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
        struct smb_request *req;
        unsigned char *buf = NULL;
 
-       req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
+       req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
        VERBOSE("allocating request: %p\n", req);
        if (!req)
                goto out;
index bca07eb2003c395b8e40d33cfe3ce548065d9ff7..a0ebfc7f8a6446e361a0146fc39dbbc5883c058f 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -51,13 +51,6 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
                return inode->i_op->getattr(mnt, dentry, stat);
 
        generic_fillattr(inode, stat);
-       if (!stat->blksize) {
-               struct super_block *s = inode->i_sb;
-               unsigned blocks;
-               blocks = (stat->size+s->s_blocksize-1) >> s->s_blocksize_bits;
-               stat->blocks = (s->s_blocksize / 512) * blocks;
-               stat->blksize = s->s_blocksize;
-       }
        return 0;
 }
 
index 20551a1b8a56494ffb7ea2b0a10c01f8f7f99a78..e503f858fba84ffa6462ae042c95077721e41f96 100644 (file)
@@ -16,7 +16,7 @@
 
 struct vfsmount *sysfs_mount;
 struct super_block * sysfs_sb = NULL;
-kmem_cache_t *sysfs_dir_cachep;
+struct kmem_cache *sysfs_dir_cachep;
 
 static struct super_operations sysfs_ops = {
        .statfs         = simple_statfs,
index 6f3d6bd52887bcd541aa917ede96338d47300bfe..bd7cec295dab25dd3451ebf2154bd6784f483695 100644 (file)
@@ -1,6 +1,6 @@
 
 extern struct vfsmount * sysfs_mount;
-extern kmem_cache_t *sysfs_dir_cachep;
+extern struct kmem_cache *sysfs_dir_cachep;
 
 extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *);
 extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
diff --git a/fs/sysv/CHANGES b/fs/sysv/CHANGES
deleted file mode 100644 (file)
index 66ea6e9..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-Mon, 15 Dec 1997         Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-       *    namei.c: struct sysv_dir_inode_operations updated to use dentries.
-
-Fri, 23 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-       *    inode.c: corrected 1 track offset setting (in sb->sv_block_base).
-                     Originally it was overridden (by setting to zero)
-                     in detected_[xenix,sysv4,sysv2,coherent]. Thanks
-                     to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-                     for identifying the problem.
-
-Tue, 27 Jan 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-        *    inode.c: added 2048-byte block support to SystemV FS.
-                     Merged detected_bs[512,1024,2048]() into one function:
-                     void detected_bs (u_char type, struct super_block *sb).
-                     Thanks to Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
-                     for the patch.
-
-Wed, 4 Feb 1998   Krzysztof G. Baranowski <kgb@manjak.knm.org.pl>
-       *    namei.c: removed static subdir(); is_subdir() from dcache.c
-                     is used instead. Cosmetic changes.
-
-Thu, 3 Dec 1998   Al Viro (viro@parcelfarce.linux.theplanet.co.uk)
-       *    namei.c (sysv_rmdir):
-                     Bugectomy: old check for victim being busy
-                     (inode->i_count) wasn't replaced (with checking
-                     dentry->d_count) and escaped Linus in the last round
-                     of changes. Shot and buried.
-
-Wed, 9 Dec 1998   AV
-       *    namei.c (do_sysv_rename):
-                      Fixed incorrect check for other owners + race.
-                      Removed checks that went to VFS.
-       *    namei.c (sysv_unlink):
-                      Removed checks that went to VFS.
-
-Thu, 10 Dec 1998   AV
-       *    namei.c (do_mknod):
-                       Removed dead code - mknod is never asked to
-                       create a symlink or directory. Incidentially,
-                       it wouldn't do it right if it would be called.
-
-Sat, 26 Dec 1998   KGB
-       *    inode.c (detect_sysv4):
-                       Added detection of expanded s_type field (0x10,
-                       0x20 and 0x30).  Forced read-only access in this case.
-
-Sun, 21 Mar 1999   AV
-       *    namei.c (sysv_link):
-                       Fixed i_count usage that resulted in dcache corruption.
-       *    inode.c:
-                       Filled ->delete_inode() method with sysv_delete_inode().
-                       sysv_put_inode() is gone, as it tried to do ->delete_
-                       _inode()'s job.
-       *    ialloc.c: (sysv_free_inode):
-                       Fixed race.
-
-Sun, 30 Apr 1999   AV
-       *    namei.c (sysv_mknod):
-                       Removed dead code (S_IFREG case is now passed to
-                       ->create() by VFS).
diff --git a/fs/sysv/ChangeLog b/fs/sysv/ChangeLog
deleted file mode 100644 (file)
index f403f8b..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-Thu Feb 14 2002  Andrew Morton  <akpm@zip.com.au>
-
-       * dir_commit_chunk(): call writeout_one_page() as well as
-         waitfor_one_page() for IS_SYNC directories, so that we
-         actually do sync the directory. (forward-port from 2.4).
-
-Thu Feb  7 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-       * super.c: switched to ->get_sb()
-       * ChangeLog: fixed dates ;-)
-
-2002-01-24  David S. Miller  <davem@redhat.com>
-
-       * inode.c: Include linux/init.h
-
-Mon Jan 21 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-       * ialloc.c (sysv_new_inode): zero SYSV_I(inode)->i_data out.
-       * i_vnode renamed to vfs_inode.  Sorry, but let's keep that
-         consistent.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-       * include/linux/sysv_fs.h (SYSV_I): Get fs-private inode data using
-               list_entry() instead of inode->u.
-       * include/linux/sysv_fs_i.h: Add 'struct inode  i_vnode' field to
-               sysv_inode_info structure.
-       * inode.c: Include <linux/slab.h>, implement alloc_inode/destroy_inode
-               sop methods, add infrastructure for per-fs inode slab cache.
-       * super.c (init_sysv_fs): Initialize inode cache, recover properly
-               in the case of failed register_filesystem for V7.
-       (exit_sysv_fs): Destroy inode cache.
-
-Sat Jan 19 2002  Christoph Hellwig  <hch@infradead.org>
-
-       * include/linux/sysv_fs.h: Include <linux/sysv_fs_i.h>, declare SYSV_I().
-       * dir.c (sysv_find_entry): Use SYSV_I() instead of ->u.sysv_i to
-               access fs-private inode data.
-       * ialloc.c (sysv_new_inode): Likewise.
-       * inode.c (sysv_read_inode): Likewise.
-       (sysv_update_inode): Likewise.
-       * itree.c (get_branch): Likewise.
-       (sysv_truncate): Likewise.
-       * symlink.c (sysv_readlink): Likewise.
-       (sysv_follow_link): Likewise.
-
-Fri Jan  4 2002  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-       * ialloc.c (sysv_free_inode): Use sb->s_id instead of bdevname().
-       * inode.c (sysv_read_inode): Likewise.
-         (sysv_update_inode): Likewise.
-         (sysv_sync_inode): Likewise.
-       * super.c (detect_sysv): Likewise.
-         (complete_read_super): Likewise.
-         (sysv_read_super): Likewise.
-         (v7_read_super): Likewise.
-
-Sun Dec 30 2001  Manfred Spraul  <manfred@colorfullife.com>
-
-       * dir.c (dir_commit_chunk): Do not set dir->i_version.
-       (sysv_readdir): Likewise.
-
-Thu Dec 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-       * itree.c (get_block): Use map_bh() to fill out bh_result.
-
-Tue Dec 25 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-       * super.c (sysv_read_super): Use sb_set_blocksize() to set blocksize.
-         (v7_read_super): Likewise.
-
-Tue Nov 27 2001  Alexander Viro  <viro@parcelfarce.linux.theplanet.co.uk>
-
-       * itree.c (get_block): Change type for iblock argument to sector_t.
-       * super.c (sysv_read_super): Set s_blocksize early.
-         (v7_read_super): Likewise.
-       * balloc.c (sysv_new_block): Use sb_bread(). instead of bread().
-         (sysv_count_free_blocks): Likewise.
-       * ialloc.c (sysv_raw_inode): Likewise.
-       * itree.c (get_branch): Likewise.
-         (free_branches): Likewise.
-       * super.c (sysv_read_super): Likewise.
-         (v7_read_super): Likewise.
-
-Sat Dec 15 2001  Christoph Hellwig  <hch@infradead.org>
-
-       * inode.c (sysv_read_inode): Mark inode as bad in case of failure.
-       * super.c (complete_read_super): Check for bad root inode.
-
-Wed Nov 21 2001  Andrew Morton  <andrewm@uow.edu.au>
-
-       * file.c (sysv_sync_file): Call fsync_inode_data_buffers.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-       * dir.c, ialloc.c, namei.c, include/linux/sysv_fs_i.h:
-       Implement per-Inode lookup offset cache.
-       Modelled after Ted's ext2 patch.
-
-Fri Oct 26 2001  Christoph Hellwig  <hch@infradead.org>
-
-       * inode.c, super.c, include/linux/sysv_fs.h,
-         include/linux/sysv_fs_sb.h:
-       Remove symlink faking.  Noone really wants to use these as
-       linux filesystems and native OSes don't support it anyway.
-
-
diff --git a/fs/sysv/INTRO b/fs/sysv/INTRO
deleted file mode 100644 (file)
index de4e4d1..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-This is the implementation of the SystemV/Coherent filesystem for Linux.
-It grew out of separate filesystem implementations
-
-    Xenix FS      Doug Evans <dje@cygnus.com>  June 1992
-    SystemV FS    Paul B. Monday <pmonday@eecs.wsu.edu> March-June 1993
-    Coherent FS   B. Haible <haible@ma2s2.mathematik.uni-karlsruhe.de> June 1993
-
-and was merged together in July 1993.
-
-These filesystems are rather similar. Here is a comparison with Minix FS:
-
-* Linux fdisk reports on partitions
-  - Minix FS     0x81 Linux/Minix
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  0x08 AIX bootable
-
-* Size of a block or zone (data allocation unit on disk)
-  - Minix FS     1024
-  - Xenix FS     1024 (also 512 ??)
-  - SystemV FS   1024 (also 512 and 2048)
-  - Coherent FS   512
-
-* General layout: all have one boot block, one super block and
-  separate areas for inodes and for directories/data.
-  On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
-  all the block numbers (including the super block) are offset by one track.
-
-* Byte ordering of "short" (16 bit entities) on disk:
-  - Minix FS     little endian  0 1
-  - Xenix FS     little endian  0 1
-  - SystemV FS   little endian  0 1
-  - Coherent FS  little endian  0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Byte ordering of "long" (32 bit entities) on disk:
-  - Minix FS     little endian  0 1 2 3
-  - Xenix FS     little endian  0 1 2 3
-  - SystemV FS   little endian  0 1 2 3
-  - Coherent FS  PDP-11         2 3 0 1
-  Of course, this affects only the file system, not the data of files on it!
-
-* Inode on disk: "short", 0 means non-existent, the root dir ino is:
-  - Minix FS                            1
-  - Xenix FS, SystemV FS, Coherent FS   2
-
-* Maximum number of hard links to a file:
-  - Minix FS     250
-  - Xenix FS     ??
-  - SystemV FS   ??
-  - Coherent FS  >=10000
-
-* Free inode management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      There is a cache of a certain number of free inodes in the super-block.
-      When it is exhausted, new free inodes are found using a linear search.
-
-* Free block management:
-  - Minix FS                             a bitmap
-  - Xenix FS, SystemV FS, Coherent FS
-      Free blocks are organized in a "free list". Maybe a misleading term,
-      since it is not true that every free block contains a pointer to
-      the next free block. Rather, the free blocks are organized in chunks
-      of limited size, and every now and then a free block contains pointers
-      to the free blocks pertaining to the next chunk; the first of these
-      contains pointers and so on. The list terminates with a "block number"
-      0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
-
-* Super-block location:
-  - Minix FS     block 1 = bytes 1024..2047
-  - Xenix FS     block 1 = bytes 1024..2047
-  - SystemV FS   bytes 512..1023
-  - Coherent FS  block 1 = bytes 512..1023
-
-* Super-block layout:
-  - Minix FS
-                    unsigned short s_ninodes;
-                    unsigned short s_nzones;
-                    unsigned short s_imap_blocks;
-                    unsigned short s_zmap_blocks;
-                    unsigned short s_firstdatazone;
-                    unsigned short s_log_zone_size;
-                    unsigned long s_max_size;
-                    unsigned short s_magic;
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short s_firstdatazone;
-                    unsigned long  s_nzones;
-                    unsigned short s_fzone_count;
-                    unsigned long  s_fzones[NICFREE];
-                    unsigned short s_finode_count;
-                    unsigned short s_finodes[NICINOD];
-                    char           s_flock;
-                    char           s_ilock;
-                    char           s_modified;
-                    char           s_rdonly;
-                    unsigned long  s_time;
-                    short          s_dinfo[4]; -- SystemV FS only
-                    unsigned long  s_free_zones;
-                    unsigned short s_free_inodes;
-                    short          s_dinfo[4]; -- Xenix FS only
-                    unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
-                    char           s_fname[6];
-                    char           s_fpack[6];
-    then they differ considerably:
-        Xenix FS
-                    char           s_clean;
-                    char           s_fill[371];
-                    long           s_magic;
-                    long           s_type;
-        SystemV FS
-                    long           s_fill[12 or 14];
-                    long           s_state;
-                    long           s_magic;
-                    long           s_type;
-        Coherent FS
-                    unsigned long  s_unique;
-    Note that Coherent FS has no magic.
-
-* Inode layout:
-  - Minix FS
-                    unsigned short i_mode;
-                    unsigned short i_uid;
-                    unsigned long  i_size;
-                    unsigned long  i_time;
-                    unsigned char  i_gid;
-                    unsigned char  i_nlinks;
-                    unsigned short i_zone[7+1+1];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short i_mode;
-                    unsigned short i_nlink;
-                    unsigned short i_uid;
-                    unsigned short i_gid;
-                    unsigned long  i_size;
-                    unsigned char  i_zone[3*(10+1+1+1)];
-                    unsigned long  i_atime;
-                    unsigned long  i_mtime;
-                    unsigned long  i_ctime;
-
-* Regular file data blocks are organized as
-  - Minix FS
-               7 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-  - Xenix FS, SystemV FS, Coherent FS
-              10 direct blocks
-               1 indirect block (pointers to blocks)
-               1 double-indirect block (pointer to pointers to blocks)
-               1 triple-indirect block (pointer to pointers to pointers to blocks)
-
-* Inode size, inodes per block
-  - Minix FS        32   32
-  - Xenix FS        64   16
-  - SystemV FS      64   16
-  - Coherent FS     64    8
-
-* Directory entry on disk
-  - Minix FS
-                    unsigned short inode;
-                    char name[14/30];
-  - Xenix FS, SystemV FS, Coherent FS
-                    unsigned short inode;
-                    char name[14];
-
-* Dir entry size, dir entries per block
-  - Minix FS     16/32    64/32
-  - Xenix FS     16       64
-  - SystemV FS   16       64
-  - Coherent FS  16       32
-
-* How to implement symbolic links such that the host fsck doesn't scream:
-  - Minix FS     normal
-  - Xenix FS     kludge: as regular files with  chmod 1000
-  - SystemV FS   ??
-  - Coherent FS  kludge: as regular files with  chmod 1000
-
-
-Notation: We often speak of a "block" but mean a zone (the allocation unit)
-and not the disk driver's notion of "block".
-
-
-Bruno Haible  <haible@ma2s2.mathematik.uni-karlsruhe.de>
index d63c5e48b050e0865c19d086e9e6346fc59a8f47..ead9864567e39d237364868ee710632adcd4175a 100644 (file)
@@ -301,13 +301,13 @@ static void sysv_delete_inode(struct inode *inode)
        unlock_kernel();
 }
 
-static kmem_cache_t *sysv_inode_cachep;
+static struct kmem_cache *sysv_inode_cachep;
 
 static struct inode *sysv_alloc_inode(struct super_block *sb)
 {
        struct sysv_inode_info *si;
 
-       si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL);
+       si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
        if (!si)
                return NULL;
        return &si->vfs_inode;
@@ -318,7 +318,7 @@ static void sysv_destroy_inode(struct inode *inode)
        kmem_cache_free(sysv_inode_cachep, SYSV_I(inode));
 }
 
-static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
 {
        struct sysv_inode_info *si = (struct sysv_inode_info *)p;
 
index 1aea6a4f9a4ab1ce76c3d452be99313f76f7dd1d..1dbc2955f02e5493879cdfda7f9b154910941504 100644 (file)
@@ -107,12 +107,12 @@ static struct file_system_type udf_fstype = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static kmem_cache_t * udf_inode_cachep;
+static struct kmem_cache * udf_inode_cachep;
 
 static struct inode *udf_alloc_inode(struct super_block *sb)
 {
        struct udf_inode_info *ei;
-       ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL);
+       ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
 
@@ -130,7 +130,7 @@ static void udf_destroy_inode(struct inode *inode)
        kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct udf_inode_info *ei = (struct udf_inode_info *) foo;
 
@@ -1709,7 +1709,7 @@ void udf_error(struct super_block *sb, const char *function,
                sb->s_dirt = 1;
        }
        va_start(args, fmt);
-       vsprintf(error_buf, fmt, args);
+       vsnprintf(error_buf, sizeof(error_buf), fmt, args);
        va_end(args);
        printk (KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
                sb->s_id, function, error_buf);
@@ -1721,7 +1721,7 @@ void udf_warning(struct super_block *sb, const char *function,
        va_list args;
 
        va_start (args, fmt);
-       vsprintf(error_buf, fmt, args);
+       vsnprintf(error_buf, sizeof(error_buf), fmt, args);
        va_end(args);
        printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
                sb->s_id, function, error_buf);
index ec79e3091d1bc7f647905edc7c410c94dcb10a3a..8a8e9382ec099a2b41d176c6dc391a316af7b80d 100644 (file)
@@ -224,7 +224,7 @@ void ufs_error (struct super_block * sb, const char * function,
                sb->s_flags |= MS_RDONLY;
        }
        va_start (args, fmt);
-       vsprintf (error_buf, fmt, args);
+       vsnprintf (error_buf, sizeof(error_buf), fmt, args);
        va_end (args);
        switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) {
        case UFS_MOUNT_ONERROR_PANIC:
@@ -255,7 +255,7 @@ void ufs_panic (struct super_block * sb, const char * function,
                sb->s_dirt = 1;
        }
        va_start (args, fmt);
-       vsprintf (error_buf, fmt, args);
+       vsnprintf (error_buf, sizeof(error_buf), fmt, args);
        va_end (args);
        sb->s_flags |= MS_RDONLY;
        printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n",
@@ -268,7 +268,7 @@ void ufs_warning (struct super_block * sb, const char * function,
        va_list args;
 
        va_start (args, fmt);
-       vsprintf (error_buf, fmt, args);
+       vsnprintf (error_buf, sizeof(error_buf), fmt, args);
        va_end (args);
        printk (KERN_WARNING "UFS-fs warning (device %s): %s: %s\n",
                sb->s_id, function, error_buf);
@@ -1204,12 +1204,12 @@ static int ufs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
-static kmem_cache_t * ufs_inode_cachep;
+static struct kmem_cache * ufs_inode_cachep;
 
 static struct inode *ufs_alloc_inode(struct super_block *sb)
 {
        struct ufs_inode_info *ei;
-       ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL);
+       ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        ei->vfs_inode.i_version = 1;
@@ -1221,7 +1221,7 @@ static void ufs_destroy_inode(struct inode *inode)
        kmem_cache_free(ufs_inode_cachep, UFS_I(inode));
 }
 
-static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct ufs_inode_info *ei = (struct ufs_inode_info *) foo;
 
index 28fce6c239b56f8d960a7bf7a820c12ad1b2b3e4..7dd12bb1d62bdf9a9f2223e0d5840097831d4960 100644 (file)
@@ -299,7 +299,7 @@ static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
 
 #define ubh_get_addr16(ubh,begin) \
        (((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
-       ((begin) & (uspi->fsize>>1) - 1)))
+       ((begin) & ((uspi->fsize>>1) - 1)))
 
 #define ubh_get_addr32(ubh,begin) \
        (((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
index 09360cf1e1f277cb5b35233aa4ef0d81edcaa38d..8e6b56fc1cad4b52504dd9c4055a3fda3a6d595d 100644 (file)
@@ -149,9 +149,10 @@ xfs_destroy_ioend(
  */
 STATIC void
 xfs_end_bio_delalloc(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc(
  */
 STATIC void
 xfs_end_bio_written(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
 
        xfs_destroy_ioend(ioend);
 }
@@ -176,9 +178,10 @@ xfs_end_bio_written(
  */
 STATIC void
 xfs_end_bio_unwritten(
-       void                    *data)
+       struct work_struct      *work)
 {
-       xfs_ioend_t             *ioend = data;
+       xfs_ioend_t             *ioend =
+               container_of(work, xfs_ioend_t, io_work);
        bhv_vnode_t             *vp = ioend->io_vnode;
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
@@ -220,11 +223,11 @@ xfs_alloc_ioend(
        ioend->io_size = 0;
 
        if (type == IOMAP_UNWRITTEN)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
        else if (type == IOMAP_DELAY)
-               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
        else
-               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written);
 
        return ioend;
 }
index d3382843698e869cf4367707a34c069352dfe0ae..4fb01ffdfd1a06163263186b967a2e04f7f90743 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kthread.h>
 #include <linux/migrate.h>
 #include <linux/backing-dev.h>
+#include <linux/freezer.h>
 
 STATIC kmem_zone_t *xfs_buf_zone;
 STATIC kmem_shaker_t xfs_buf_shake;
@@ -994,9 +995,10 @@ xfs_buf_wait_unpin(
 
 STATIC void
 xfs_buf_iodone_work(
-       void                    *v)
+       struct work_struct      *work)
 {
-       xfs_buf_t               *bp = (xfs_buf_t *)v;
+       xfs_buf_t               *bp =
+               container_of(work, xfs_buf_t, b_iodone_work);
 
        if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
@@ -1017,10 +1019,10 @@ xfs_buf_ioend(
 
        if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
                if (schedule) {
-                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
                        queue_work(xfslogd_workqueue, &bp->b_iodone_work);
                } else {
-                       xfs_buf_iodone_work(bp);
+                       xfs_buf_iodone_work(&bp->b_iodone_work);
                }
        } else {
                up(&bp->b_iodonesema);
@@ -1825,11 +1827,11 @@ xfs_buf_init(void)
        if (!xfs_buf_zone)
                goto out_free_trace_buf;
 
-       xfslogd_workqueue = create_workqueue("xfslogd");
+       xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
        if (!xfslogd_workqueue)
                goto out_free_buf_zone;
 
-       xfsdatad_workqueue = create_workqueue("xfsdatad");
+       xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
        if (!xfsdatad_workqueue)
                goto out_destroy_xfslogd_workqueue;
 
index de05abbbe7fd5b4d220b3baee7230006631cdd33..b93265b7c79ccd8f4238f4fd1a6c651d370ee067 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/mempool.h>
 #include <linux/writeback.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 STATIC struct quotactl_ops xfs_quotactl_operations;
 STATIC struct super_operations xfs_super_operations;
index 47faf27913a576ccf930d8cbcad1f8a3fc8f2215..7f1e92930b6258b75e931daac2e25f657441dd27 100644 (file)
@@ -64,7 +64,7 @@
 /* Host-dependent types and defines */
 
 #define ACPI_MACHINE_WIDTH          BITS_PER_LONG
-#define acpi_cache_t                        kmem_cache_t
+#define acpi_cache_t                        struct kmem_cache
 #define acpi_spinlock                   spinlock_t *
 #define ACPI_EXPORT_SYMBOL(symbol)  EXPORT_SYMBOL(symbol);
 #define strtoul                     simple_strtoul
index b9ff4d8cb33a95dd5c50db2d77c88a110c415d70..57e09f5e342490488e0a86e424fc499df326794b 100644 (file)
@@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 
 #define dma_alloc_noncoherent(d, s, h, f)      dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h)       dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(dev)                 (1)
+#define dma_is_consistent(d, h)                        (1)
 
 int dma_set_mask(struct device *dev, u64 mask);
 
@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 #define dma_sync_single_range(dev, addr, off, size, dir)  do { } while (0)
 #define dma_sync_sg_for_cpu(dev, sg, nents, dir)         do { } while (0)
 #define dma_sync_sg_for_device(dev, sg, nents, dir)      do { } while (0)
-#define dma_cache_sync(va, size, dir)                    do { } while (0)
+#define dma_cache_sync(dev, va, size, dir)               do { } while (0)
 
 #define dma_get_cache_alignment()                        L1_CACHE_BYTES
 
index 2cabbd465c0c4d1b3437dafea1bf7030b080c217..84313d14e78065284ea619bb9b20a90f0377bf76 100644 (file)
 
 #define NR_SYSCALLS                    447
 
-#if defined(__GNUC__)
-
-#define _syscall_return(type)                                          \
-       return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
-
-#define _syscall_clobbers                                              \
-       "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",                 \
-       "$22", "$23", "$24", "$25", "$27", "$28"                        \
-
-#define _syscall0(type, name)                                          \
-type name(void)                                                                \
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_19 __asm__("$19");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               __asm__("callsys # %0 %1 %2"                            \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0)                                    \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#define _syscall1(type,name,type1,arg1)                                        \
-type name(type1 arg1)                                                  \
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_19 __asm__("$19");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               __asm__("callsys # %0 %1 %2 %3"                         \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16)                       \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)                     \
-type name(type1 arg1,type2 arg2)                                       \
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_17 __asm__("$17");                    \
-               register long _sc_19 __asm__("$19");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               _sc_17 = (long) (arg2);                                 \
-               __asm__("callsys # %0 %1 %2 %3 %4"                      \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17)          \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)          \
-type name(type1 arg1,type2 arg2,type3 arg3)                            \
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_17 __asm__("$17");                    \
-               register long _sc_18 __asm__("$18");                    \
-               register long _sc_19 __asm__("$19");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               _sc_17 = (long) (arg2);                                 \
-               _sc_18 = (long) (arg3);                                 \
-               __asm__("callsys # %0 %1 %2 %3 %4 %5"                   \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),         \
-                         "r"(_sc_18)                                   \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)              \
-{                                                                       \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_17 __asm__("$17");                    \
-               register long _sc_18 __asm__("$18");                    \
-               register long _sc_19 __asm__("$19");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               _sc_17 = (long) (arg2);                                 \
-               _sc_18 = (long) (arg3);                                 \
-               _sc_19 = (long) (arg4);                                 \
-               __asm__("callsys # %0 %1 %2 %3 %4 %5 %6"                \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),         \
-                         "r"(_sc_18), "1"(_sc_19)                      \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5)                                                    \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)     \
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_17 __asm__("$17");                    \
-               register long _sc_18 __asm__("$18");                    \
-               register long _sc_19 __asm__("$19");                    \
-               register long _sc_20 __asm__("$20");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               _sc_17 = (long) (arg2);                                 \
-               _sc_18 = (long) (arg3);                                 \
-               _sc_19 = (long) (arg4);                                 \
-               _sc_20 = (long) (arg5);                                 \
-               __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7"             \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),         \
-                         "r"(_sc_18), "1"(_sc_19), "r"(_sc_20)         \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5,type6,arg6)                                         \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
-{                                                                      \
-       long _sc_ret, _sc_err;                                          \
-       {                                                               \
-               register long _sc_0 __asm__("$0");                      \
-               register long _sc_16 __asm__("$16");                    \
-               register long _sc_17 __asm__("$17");                    \
-               register long _sc_18 __asm__("$18");                    \
-               register long _sc_19 __asm__("$19");                    \
-               register long _sc_20 __asm__("$20");                    \
-               register long _sc_21 __asm__("$21");                    \
-                                                                       \
-               _sc_0 = __NR_##name;                                    \
-               _sc_16 = (long) (arg1);                                 \
-               _sc_17 = (long) (arg2);                                 \
-               _sc_18 = (long) (arg3);                                 \
-               _sc_19 = (long) (arg4);                                 \
-               _sc_20 = (long) (arg5);                                 \
-               _sc_21 = (long) (arg6);                                 \
-               __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8"          \
-                       : "=r"(_sc_0), "=r"(_sc_19)                     \
-                       : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17),         \
-                         "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
-                       : _syscall_clobbers);                           \
-               _sc_ret = _sc_0, _sc_err = _sc_19;                      \
-       }                                                               \
-       _syscall_return(type);                                          \
-}
-
-#endif /* __GNUC__ */
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index 805ae3575e44cf76048a2203fc77940c66e16c0c..345a649ec8389fbaf303bfab8297ddfa6a1676ac 100644 (file)
@@ -24,7 +24,7 @@ struct omap_irda_config {
        /* Very specific to the needs of some platforms (h3,h4)
         * having calls which can sleep in irda_set_speed.
         */
-       struct work_struct gpio_expa;
+       struct delayed_work gpio_expa;
        int rx_channel;
        int tx_channel;
        unsigned long dest_start;
index 666617711c81d259745ba2b0dc12d935c4d4fb0a..9bc46b486afba7a5970e93b5c938a3be33c4684e 100644 (file)
@@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void)
        return 32;
 }
 
-static inline int dma_is_consistent(dma_addr_t handle)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
 {
        return !!arch_is_coherent();
 }
index aa4b5782f0c9723cf1d604ca7756a09bea8e6829..e5407392afcad0413c0df46030f51e2c93c4fd5b 100644 (file)
 #ifndef __ASMARM_SETUP_H
 #define __ASMARM_SETUP_H
 
+#include <asm/types.h>
+
 #define COMMAND_LINE_SIZE 1024
 
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE      0x00000000
 
 struct tag_header {
-       u32 size;
-       u32 tag;
+       __u32 size;
+       __u32 tag;
 };
 
 /* The list must start with an ATAG_CORE node */
 #define ATAG_CORE      0x54410001
 
 struct tag_core {
-       u32 flags;              /* bit 0 = read-only */
-       u32 pagesize;
-       u32 rootdev;
+       __u32 flags;            /* bit 0 = read-only */
+       __u32 pagesize;
+       __u32 rootdev;
 };
 
 /* it is allowed to have multiple ATAG_MEM nodes */
 #define ATAG_MEM       0x54410002
 
 struct tag_mem32 {
-       u32     size;
-       u32     start;  /* physical start address */
+       __u32   size;
+       __u32   start;  /* physical start address */
 };
 
 /* VGA text type displays */
 #define ATAG_VIDEOTEXT 0x54410003
 
 struct tag_videotext {
-       u8              x;
-       u8              y;
-       u16             video_page;
-       u8              video_mode;
-       u8              video_cols;
-       u16             video_ega_bx;
-       u8              video_lines;
-       u8              video_isvga;
-       u16             video_points;
+       __u8            x;
+       __u8            y;
+       __u16           video_page;
+       __u8            video_mode;
+       __u8            video_cols;
+       __u16           video_ega_bx;
+       __u8            video_lines;
+       __u8            video_isvga;
+       __u16           video_points;
 };
 
 /* describes how the ramdisk will be used in kernel */
 #define ATAG_RAMDISK   0x54410004
 
 struct tag_ramdisk {
-       u32 flags;      /* bit 0 = load, bit 1 = prompt */
-       u32 size;       /* decompressed ramdisk size in _kilo_ bytes */
-       u32 start;      /* starting block of floppy-based RAM disk image */
+       __u32 flags;    /* bit 0 = load, bit 1 = prompt */
+       __u32 size;     /* decompressed ramdisk size in _kilo_ bytes */
+       __u32 start;    /* starting block of floppy-based RAM disk image */
 };
 
 /* describes where the compressed ramdisk image lives (virtual address) */
@@ -76,23 +78,23 @@ struct tag_ramdisk {
 #define ATAG_INITRD2   0x54420005
 
 struct tag_initrd {
-       u32 start;      /* physical start address */
-       u32 size;       /* size of compressed ramdisk image in bytes */
+       __u32 start;    /* physical start address */
+       __u32 size;     /* size of compressed ramdisk image in bytes */
 };
 
 /* board serial number. "64 bits should be enough for everybody" */
 #define ATAG_SERIAL    0x54410006
 
 struct tag_serialnr {
-       u32 low;
-       u32 high;
+       __u32 low;
+       __u32 high;
 };
 
 /* board revision */
 #define ATAG_REVISION  0x54410007
 
 struct tag_revision {
-       u32 rev;
+       __u32 rev;
 };
 
 /* initial values for vesafb-type framebuffers. see struct screen_info
@@ -101,20 +103,20 @@ struct tag_revision {
 #define ATAG_VIDEOLFB  0x54410008
 
 struct tag_videolfb {
-       u16             lfb_width;
-       u16             lfb_height;
-       u16             lfb_depth;
-       u16             lfb_linelength;
-       u32             lfb_base;
-       u32             lfb_size;
-       u8              red_size;
-       u8              red_pos;
-       u8              green_size;
-       u8              green_pos;
-       u8              blue_size;
-       u8              blue_pos;
-       u8              rsvd_size;
-       u8              rsvd_pos;
+       __u16           lfb_width;
+       __u16           lfb_height;
+       __u16           lfb_depth;
+       __u16           lfb_linelength;
+       __u32           lfb_base;
+       __u32           lfb_size;
+       __u8            red_size;
+       __u8            red_pos;
+       __u8            green_size;
+       __u8            green_pos;
+       __u8            blue_size;
+       __u8            blue_pos;
+       __u8            rsvd_size;
+       __u8            rsvd_pos;
 };
 
 /* command line: \0 terminated string */
@@ -128,17 +130,17 @@ struct tag_cmdline {
 #define ATAG_ACORN     0x41000101
 
 struct tag_acorn {
-       u32 memc_control_reg;
-       u32 vram_pages;
-       u8 sounddefault;
-       u8 adfsdrives;
+       __u32 memc_control_reg;
+       __u32 vram_pages;
+       __u8 sounddefault;
+       __u8 adfsdrives;
 };
 
 /* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
 #define ATAG_MEMCLK    0x41000402
 
 struct tag_memclk {
-       u32 fmemclk;
+       __u32 fmemclk;
 };
 
 struct tag {
@@ -167,24 +169,26 @@ struct tag {
 };
 
 struct tagtable {
-       u32 tag;
+       __u32 tag;
        int (*parse)(const struct tag *);
 };
 
-#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
-#define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
-
 #define tag_member_present(tag,member)                         \
        ((unsigned long)(&((struct tag *)0L)->member + 1)       \
                <= (tag)->hdr.size * 4)
 
-#define tag_next(t)    ((struct tag *)((u32 *)(t) + (t)->hdr.size))
+#define tag_next(t)    ((struct tag *)((__u32 *)(t) + (t)->hdr.size))
 #define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
 
 #define for_each_tag(t,base)           \
        for (t = base; t->hdr.size; t = tag_next(t))
 
+#ifdef __KERNEL__
+
+#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
 /*
  * Memory map description
  */
@@ -217,4 +221,6 @@ struct early_params {
 static struct early_params __early_##fn __attribute_used__     \
 __attribute__((__section__(".early_param.init"))) = { name, fn }
 
+#endif  /*  __KERNEL__  */
+
 #endif
index 14a87eec5a2dd28662173173316bf1a9ffa29ac6..d44c629d84249607d5689c6a2bc2dbf611f8e8fc 100644 (file)
 #endif
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#if defined(__thumb__) || defined(__ARM_EABI__)
-#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name;
-#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs
-#define __syscall(name) "swi\t0"
-#else
-#define __SYS_REG(name)
-#define __SYS_REG_LIST(regs...) regs
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-#endif
-
-#define __syscall_return(type, res)                                    \
-do {                                                                   \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {      \
-               errno = -(res);                                         \
-               res = -1;                                               \
-       }                                                               \
-       return (type) (res);                                            \
-} while (0)
-
-#define _syscall0(type,name)                                           \
-type name(void) {                                                      \
-  __SYS_REG(name)                                                      \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST()                                              \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall1(type,name,type1,arg1)                                \
-type name(type1 arg1) {                                                \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0) )                                  \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)                     \
-type name(type1 arg1,type2 arg2) {                                     \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0), "r" (__r1) )                      \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)          \
-type name(type1 arg1,type2 arg2,type3 arg3) {                          \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) )          \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {            \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)    \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {        \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __r4 __asm__("r4") = (long)arg5;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),           \
-                         "r" (__r3), "r" (__r4) )                      \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {    \
-  __SYS_REG(name)                                                      \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __r4 __asm__("r4") = (long)arg5;                       \
-  register long __r5 __asm__("r5") = (long)arg6;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2),           \
-                         "r" (__r3), "r" (__r4), "r" (__r5) )          \
-       : "memory" );                                                   \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
index 6437167b1ffe7f2988b3d00d54084f66067640d1..7725af3ddb4df972b69d71b9487f587b8e2fc1c0 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/tlbflush.h>
 #include <linux/slab.h>
 
-extern kmem_cache_t *pte_cache;
+extern struct kmem_cache *pte_cache;
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){
        return kmem_cache_alloc(pte_cache, GFP_KERNEL);
index 6348931be65d9333d19a427a2f6981684e2dde54..1a867b4e8d531e4f86b81feb4680e6ab15de7636 100644 (file)
@@ -16,6 +16,8 @@
 
 #define COMMAND_LINE_SIZE 1024
 
+#ifdef __KERNEL__
+
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE      0x00000000
 
@@ -202,4 +204,6 @@ struct meminfo {
 
 extern struct meminfo meminfo;
 
+#endif  /*  __KERNEL__  */
+
 #endif
index 25a5eead85beaa4a26530f9d331519edc6270ffe..4c3b919177e5491feac582824afa877148718fce 100644 (file)
 #define __ARM_NR_usr26                 (__ARM_NR_BASE+3)
 
 #ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-
-#define __syscall_return(type, res)                                    \
-do {                                                                   \
-       if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) {        \
-               errno = -(res);                                         \
-               res = -1;                                               \
-       }                                                               \
-       return (type) (res);                                            \
-} while (0)
-
-#define _syscall0(type,name)                                           \
-type name(void) {                                                      \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       :                                                               \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall1(type,name,type1,arg1)                                \
-type name(type1 arg1) {                                                \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0)                                                    \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)                     \
-type name(type1 arg1,type2 arg2) {                                     \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0),"r" (__r1)                                         \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)          \
-type name(type1 arg1,type2 arg2,type3 arg3) {                          \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0),"r" (__r1),"r" (__r2)                              \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {            \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3)                   \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-  
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)    \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) {        \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __r4 __asm__("r4") = (long)arg5;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4)        \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) {    \
-  register long __r0 __asm__("r0") = (long)arg1;                       \
-  register long __r1 __asm__("r1") = (long)arg2;                       \
-  register long __r2 __asm__("r2") = (long)arg3;                       \
-  register long __r3 __asm__("r3") = (long)arg4;                       \
-  register long __r4 __asm__("r4") = (long)arg5;                       \
-  register long __r5 __asm__("r5") = (long)arg6;                       \
-  register long __res_r0 __asm__("r0");                                        \
-  long __res;                                                          \
-  __asm__ __volatile__ (                                               \
-  __syscall(name)                                                      \
-       : "=r" (__res_r0)                                               \
-       : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5)            \
-       : "lr");                                                        \
-  __res = __res_r0;                                                    \
-  __syscall_return(type,__res);                                                \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 4c40cb41cdf8399a188e2da74bb379f25f113dbd..0580b5d62bba1385073f2b7a7d0218fc78a58f92 100644 (file)
@@ -8,7 +8,8 @@
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 
-extern void dma_cache_sync(void *vaddr, size_t size, int direction);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       int direction);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -307,7 +308,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 1;
 }
index 10193da4113b8d92a0c02b5114339371517e82e4..0a5224245e44088bb69fb797d348f2de81e5f639 100644 (file)
@@ -13,6 +13,8 @@
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 /* Magic number indicating that a tag table is present */
 #define ATAG_MAGIC     0xa2a25441
 
@@ -138,4 +140,6 @@ void chip_enable_sdram(void);
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_AVR32_SETUP_H__ */
index 3f47db9675afd546d78ec371f1bdc61ddb3120ce..2bff153a32ed6d557edc376ed0bdb65e62e2e939 100644 (file)
@@ -57,11 +57,6 @@ typedef unsigned long long u64;
 
 typedef u32 dma_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index b73f5396e5a6b6f634726d90515594341f8d219a..be85f6de25d36dcfac1242ad926d16764fe02d78 100644 (file)
@@ -10,7 +10,7 @@
  * number.  They differ in that the first function also inverts all bits
  * in the input.
  */
-extern inline unsigned long cris_swapnwbrlz(unsigned long w)
+static inline unsigned long cris_swapnwbrlz(unsigned long w)
 {
        /* Let's just say we return the result in the same register as the
           input.  Saying we clobber the input but can return the result
@@ -26,7 +26,7 @@ extern inline unsigned long cris_swapnwbrlz(unsigned long w)
        return res;
 }
 
-extern inline unsigned long cris_swapwbrlz(unsigned long w)
+static inline unsigned long cris_swapwbrlz(unsigned long w)
 {
        unsigned res;
        __asm__ ("swapwbr %0 \n\t"
@@ -40,7 +40,7 @@ extern inline unsigned long cris_swapwbrlz(unsigned long w)
  * ffz = Find First Zero in word. Undefined if no zero exists,
  * so code should check against ~0UL first..
  */
-extern inline unsigned long ffz(unsigned long w)
+static inline unsigned long ffz(unsigned long w)
 {
        return cris_swapnwbrlz(w);
 }
@@ -51,7 +51,7 @@ extern inline unsigned long ffz(unsigned long w)
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-extern inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
 {
        return cris_swapnwbrlz(~word);
 }
@@ -65,7 +65,7 @@ extern inline unsigned long __ffs(unsigned long word)
  * differs in spirit from the above ffz (man ffs).
  */
 
-extern inline unsigned long kernel_ffs(unsigned long w)
+static inline unsigned long kernel_ffs(unsigned long w)
 {
        return w ? cris_swapwbrlz (w) + 1 : 0;
 }
index cbf1a98f012975d67ca8b3b5efd04b896ccc38b8..662cea70152dc2ce2247f067625bd5ce00108bbe 100644 (file)
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
        return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
 }
index dbd0f30b85b6aa7653da1e85edd9165d41c9d3a4..a8e1e6cb7cd02f816823b22261808e53bbb290bd 100644 (file)
 /*
  * These two _must_ execute atomically wrt each other.
  */
-extern inline void wake_one_more(struct semaphore * sem)
+static inline void wake_one_more(struct semaphore * sem)
 {
        atomic_inc(&sem->waking);
 }
 
-extern inline int waking_non_zero(struct semaphore *sem)
+static inline int waking_non_zero(struct semaphore *sem)
 {
        unsigned long flags;
        int ret = 0;
@@ -40,7 +40,7 @@ extern inline int waking_non_zero(struct semaphore *sem)
        return ret;
 }
 
-extern inline int waking_non_zero_interruptible(struct semaphore *sem,
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
                                                struct task_struct *tsk)
 {
        int ret = 0;
@@ -59,7 +59,7 @@ extern inline int waking_non_zero_interruptible(struct semaphore *sem,
        return ret;
 }
 
-extern inline int waking_non_zero_trylock(struct semaphore *sem)
+static inline int waking_non_zero_trylock(struct semaphore *sem)
 {
         int ret = 1;
        unsigned long flags;
index e9fc1d47797e295664a0fa5f025b80bdce7fb50b..bcb2df68496e684d582f71d6a119f5398ee31c2f 100644 (file)
@@ -172,10 +172,10 @@ int dma_get_cache_alignment(void)
        return 1 << L1_CACHE_SHIFT;
 }
 
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction direction)
 {
        flush_write_buffers();
index 0f390f41f81680a70c12fe26cdae314644466344..ff4d6cdeb1522811bd4131da61d68575ff55502b 100644 (file)
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
        unsigned long paddr;
 
-       inc_preempt_count();
+       pagefault_disable();
        paddr = page_to_phys(page);
 
        switch (type) {
@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
        default:
                BUG();
        }
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 #endif /* !__ASSEMBLY__ */
index 168381ebb41a51d680c01ce2b6081dfc008ca523..365653b1726c18892a3a1ea5efb760b50f64ddaa 100644 (file)
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN         64      /* max length of hostname */
-#define COMMAND_LINE_SIZE      512
 
 #endif /* _ASM_PARAM_H */
index 0d293b9a585706806f6a05f6978b9cef0f6dcd5e..afd787ceede6687b131cd073c0ae315a31108d07 100644 (file)
 #ifndef _ASM_SETUP_H
 #define _ASM_SETUP_H
 
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #include <linux/init.h>
 
 #ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@ extern unsigned long __initdata num_mappedpages;
 
 #endif /* !__ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _ASM_SETUP_H */
index 725e854928cf3cf60f79863fd2db64011ad0747c..584c0417ae4de9ed74a0e167e6afd7cc30fc9738 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls 310
-#include <linux/err.h>
-
-/*
- * process the return value of a syscall, consigning it to one of two possible fates
- * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
- */
-#undef __syscall_return
-#define __syscall_return(type, res)                                    \
-do {                                                                   \
-        unsigned long __sr2 = (res);                                   \
-       if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
-               errno = (-__sr2);                                       \
-               __sr2 = ~0UL;                                           \
-       }                                                               \
-       return (type) __sr2;                                            \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#undef _syscall0
-#define _syscall0(type,name)                                           \
-type name(void)                                                                \
-{                                                                      \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
-       register unsigned long __sc0 __asm__ ("gr8");                   \
-       __asm__ __volatile__ ("tira gr0,#0"                             \
-                             : "=r" (__sc0)                            \
-                             : "r" (__scnum));                         \
-       __syscall_return(type, __sc0);                                  \
-}
-
-#undef _syscall1
-#define _syscall1(type,name,type1,arg1)                                                \
-type name(type1 arg1)                                                          \
-{                                                                              \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);         \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;    \
-       __asm__ __volatile__ ("tira gr0,#0"                                     \
-                             : "+r" (__sc0)                                    \
-                             : "r" (__scnum));                                 \
-       __syscall_return(type, __sc0);                                          \
-}
-
-#undef _syscall2
-#define _syscall2(type,name,type1,arg1,type2,arg2)                             \
-type name(type1 arg1,type2 arg2)                                               \
-{                                                                              \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);         \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;    \
-       register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;    \
-       __asm__ __volatile__ ("tira gr0,#0"                                     \
-                             : "+r" (__sc0)                                    \
-                             : "r" (__scnum), "r" (__sc1));                    \
-       __syscall_return(type, __sc0);                                          \
-}
-
-#undef _syscall3
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)                  \
-type name(type1 arg1,type2 arg2,type3 arg3)                                    \
-{                                                                              \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);         \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;    \
-       register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;    \
-       register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;   \
-       __asm__ __volatile__ ("tira gr0,#0"                                     \
-                             : "+r" (__sc0)                                    \
-                             : "r" (__scnum), "r" (__sc1), "r" (__sc2));       \
-       __syscall_return(type, __sc0);                                          \
-}
-
-#undef _syscall4
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)               \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4)                             \
-{                                                                                      \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);                 \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;            \
-       register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;            \
-       register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;           \
-       register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;           \
-       __asm__ __volatile__ ("tira gr0,#0"                                             \
-                             : "+r" (__sc0)                                            \
-                             : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3));  \
-       __syscall_return(type, __sc0);                                                  \
-}
-
-#undef _syscall5
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5)    \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)                 \
-{                                                                                      \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);                 \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;            \
-       register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;            \
-       register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;           \
-       register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;           \
-       register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;           \
-       __asm__ __volatile__ ("tira gr0,#0"                                             \
-                             : "+r" (__sc0)                                            \
-                             : "r" (__scnum), "r" (__sc1), "r" (__sc2),                \
-                             "r" (__sc3), "r" (__sc4));                                \
-       __syscall_return(type, __sc0);                                                  \
-}
-
-#undef _syscall6
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6)              \
-{                                                                                               \
-       register unsigned long __scnum __asm__ ("gr7") = (__NR_##name);                          \
-       register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1;                     \
-       register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2;                     \
-       register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3;                    \
-       register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4;                    \
-       register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5;                    \
-       register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6;                    \
-       __asm__ __volatile__ ("tira gr0,#0"                                                      \
-                             : "+r" (__sc0)                                                     \
-                             : "r" (__scnum), "r" (__sc1), "r" (__sc2),                         \
-                             "r" (__sc3), "r" (__sc4), "r" (__sc5));                            \
-       __syscall_return(type, __sc0);                                                           \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 /* #define __ARCH_WANT_OLD_READDIR */
index 3c06be38170116d61436039cfb494df7d0fc88a8..fa14f8cd30c501a6b7e0c8e8e391210a9c62a759 100644 (file)
@@ -1,4 +1,3 @@
-header-y += atomic.h
 header-y += errno-base.h
 header-y += errno.h
 header-y += fcntl.h
index a84c3d88a18912af529d794c3de75a66ad675049..a37e95fe58d64734f0323ace02bf062ad155f8cd 100644 (file)
@@ -14,6 +14,7 @@ unifdef-y += posix_types.h
 unifdef-y += ptrace.h
 unifdef-y += resource.h
 unifdef-y += sembuf.h
+unifdef-y += setup.h
 unifdef-y += shmbuf.h
 unifdef-y += sigcontext.h
 unifdef-y += siginfo.h
index 42a95d9a0641b4059a96b464d7d0a0e38cbd1b33..b7e4a0467cb1283ed82b9cd74bf2238178376a87 100644 (file)
@@ -66,7 +66,7 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
        atomic64_sub(i, v);
 }
 
-#else
+#else  /*  BITS_PER_LONG == 64  */
 
 typedef atomic_t atomic_long_t;
 
@@ -113,5 +113,6 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
        atomic_sub(i, v);
 }
 
-#endif
-#endif
+#endif  /*  BITS_PER_LONG == 64  */
+
+#endif  /*  _ASM_GENERIC_ATOMIC_H  */
index b541e48cc545f6c467bc5ca691731a8535df98bd..783ab9944d701234cfd80eeaf34da6ab90af6f04 100644 (file)
@@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr)
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        /* could define this in terms of the dma_cache ... operations,
index df893c160318a6ac84ae076bc7ac5b98c33722ac..f422df0956a27025deb4c6cdbbc5a47646f74c9e 100644 (file)
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index e60d6f21fa62f25bee576f58b36fea2ead229a7b..4d4c62d11059e84718c8c104c59eba270728fb58 100644 (file)
@@ -11,8 +11,8 @@
 
 #define RODATA                                                         \
        . = ALIGN(4096);                                                \
-       __start_rodata = .;                                             \
        .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
+               VMLINUX_SYMBOL(__start_rodata) = .;                     \
                *(.rodata) *(.rodata.*)                                 \
                *(__vermagic)           /* Kernel version magic */      \
        }                                                               \
                *(__ksymtab_strings)                                    \
        }                                                               \
                                                                        \
+       EH_FRAME                                                        \
+                                                                       \
        /* Built-in module parameters. */                               \
        __param : AT(ADDR(__param) - LOAD_OFFSET) {                     \
                VMLINUX_SYMBOL(__start___param) = .;                    \
                *(__param)                                              \
                VMLINUX_SYMBOL(__stop___param) = .;                     \
+               VMLINUX_SYMBOL(__end_rodata) = .;                       \
        }                                                               \
                                                                        \
-       /* Unwind data binary search table */                           \
-       EH_FRAME_HDR                                                    \
-                                                                       \
-       __end_rodata = .;                                               \
        . = ALIGN(4096);
 
 #define SECURITY_INIT                                                  \
                VMLINUX_SYMBOL(__kprobes_text_end) = .;
 
 #ifdef CONFIG_STACK_UNWIND
-               /* Unwind data binary search table */
-#define EH_FRAME_HDR                                                   \
+#define EH_FRAME                                                       \
+               /* Unwind data binary search table */                   \
+               . = ALIGN(8);                                           \
                .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
                        VMLINUX_SYMBOL(__start_unwind_hdr) = .;         \
                        *(.eh_frame_hdr)                                \
                        VMLINUX_SYMBOL(__end_unwind_hdr) = .;           \
+               }                                                       \
+               /* Unwind data */                                       \
+               . = ALIGN(8);                                           \
+               .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {         \
+                       VMLINUX_SYMBOL(__start_unwind) = .;             \
+                       *(.eh_frame)                                    \
+                       VMLINUX_SYMBOL(__end_unwind) = .;               \
                }
 #else
-#define EH_FRAME_HDR
+#define EH_FRAME
 #endif
 
                /* DWARF debug sections.
index cbccbbdd640fab6218f23011e26679a3acb29dee..743beba70f82a71606d6c786e8e7d8257295fcc8 100644 (file)
@@ -9,7 +9,7 @@
  * Delay routines, using a pre-computed "loops_per_second" value.
  */
 
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
 {
        __asm__ __volatile__ ("1:\n\t"
                              "dec.l #1,%0\n\t"
@@ -27,7 +27,7 @@ extern __inline__ void __delay(unsigned long loops)
 
 extern unsigned long loops_per_jiffy;
 
-extern __inline__ void udelay(unsigned long usecs)
+static inline void udelay(unsigned long usecs)
 {
        usecs *= 4295;          /* 2**32 / 1000000 */
        usecs /= (loops_per_jiffy*HZ);
index 855721a5dcc973d0661e6e182301d958cdaf6606..5c165f7bee0edb4123bc015d4092ac71f4cf4071 100644 (file)
@@ -9,7 +9,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-extern inline int
+static inline int
 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
        // mm->context = virt_to_phys(mm->pgd);
@@ -23,7 +23,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
 {
 }
 
-extern inline void activate_mm(struct mm_struct *prev_mm,
+static inline void activate_mm(struct mm_struct *prev_mm,
                               struct mm_struct *next_mm)
 {
 }
index 5edad5b70fd56dcf45c79a7b852b73d005d8ed4a..0c771b05fdd5db25bdb6210aa28fb0457f25c950 100644 (file)
 #define pcibios_assign_all_busses()    0
 #define pcibios_scan_all_fns(a, b)     0
 
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
 {
        /* No special bus mastering setup handling */
 }
 
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
 {
        /* We don't do dynamic PCI IRQ allocation */
 }
index bbdffbeeedef9cbed6d0e763745469dbf2fb76ca..9a2c5c9fd700346ef514f37c57edba3f68a0dd57 100644 (file)
@@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm,
        BUG();
 }
 
-extern inline void flush_tlb_kernel_page(unsigned long addr)
+static inline void flush_tlb_kernel_page(unsigned long addr)
 {
        BUG();
 }
 
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
                                      unsigned long start, unsigned long end)
 {
        BUG();
index da2402b8654059149e2ee9d13e777997b720731d..2a8b1b2be782fa1df9923dd8b3a5bf96dcc3c407 100644 (file)
@@ -55,12 +55,6 @@ typedef unsigned long long u64;
 
 typedef u32 dma_addr_t;
 
-#define HAVE_SECTOR_T
-typedef u64 sector_t;
-
-#define HAVE_BLKCNT_T
-typedef u64 blkcnt_t;
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
index 747788d629ae1dc192d87356bb15a986dca95e75..7ddd414f8d16b37dd9068ddbd85ad2467eb7a6e1 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls 289
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-       /* avoid using res which is declared to be in register d0; \
-          errno might expand to a function call and clobber it.  */ \
-               int __err = -(res); \
-               errno = __err; \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)                          \
-type name(void)                                                \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name)             \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall1(type, name, atype, a)                        \
-type name(atype a)                                     \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  _a = (long)a;                                                \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall2(type, name, atype, a, btype, b)      \
-type name(atype a, btype b)                            \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  register long _b __asm__("er2");                     \
-  _a = (long)a;                                                \
-  _b = (long)b;                                                \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a),                     \
-                         "g" (_b)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)    \
-type name(atype a, btype b, ctype c)                   \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  register long _b __asm__("er2");                     \
-  register long _c __asm__("er3");                     \
-  _a = (long)a;                                                \
-  _b = (long)b;                                                \
-  _c = (long)c;                                                \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a),                     \
-                         "g" (_b),                     \
-                         "g" (_c)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall4(type, name, atype, a, btype, b,      \
-                  ctype, c, dtype, d)                  \
-type name(atype a, btype b, ctype c, dtype d)          \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  register long _b __asm__("er2");                     \
-  register long _c __asm__("er3");                     \
-  register long _d __asm__("er4");                     \
-  _a = (long)a;                                                \
-  _b = (long)b;                                                \
-  _c = (long)c;                                                \
-  _d = (long)d;                                                \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a),                     \
-                         "g" (_b),                     \
-                         "g" (_c),                     \
-                         "g" (_d)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall5(type, name, atype, a, btype, b,      \
-                  ctype, c, dtype, d, etype, e)                \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  register long _b __asm__("er2");                     \
-  register long _c __asm__("er3");                     \
-  register long _d __asm__("er4");                     \
-  register long _e __asm__("er5");                     \
-  _a = (long)a;                                        \
-  _b = (long)b;                                        \
-  _c = (long)c;                                        \
-  _d = (long)d;                                        \
-  _e = (long)e;                                        \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a),                     \
-                         "g" (_b),                     \
-                         "g" (_c),                     \
-                         "g" (_d),                     \
-                         "g" (_e)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
-
-#define _syscall6(type, name, atype, a, btype, b,      \
-                  ctype, c, dtype, d, etype, e, ftype, f)      \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f)        \
-{                                                      \
-  register long __res __asm__("er0");                  \
-  register long _a __asm__("er1");                     \
-  register long _b __asm__("er2");                     \
-  register long _c __asm__("er3");                     \
-  register long _d __asm__("er4");                     \
-  register long _e __asm__("er5");                     \
-  register long _f __asm__("er6");                     \
-  _a = (long)a;                                        \
-  _b = (long)b;                                        \
-  _c = (long)c;                                        \
-  _d = (long)d;                                        \
-  _e = (long)e;                                        \
-  _f = (long)f;                                        \
-  __asm__ __volatile__ ("mov.l %1,er0\n\t"             \
-                        "trapa #0\n\t"                 \
-                       : "=r" (__res)                  \
-                       : "g" (__NR_##name),            \
-                         "g" (_a),                     \
-                         "g" (_b),                     \
-                         "g" (_c),                     \
-                         "g" (_d),                     \
-                         "g" (_e)                      \
-                         "g" (_f)                      \
-                       : "cc", "memory");              \
-  __syscall_return(type, __res);                       \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 147e4ac1ebf0e44880c91f7fa4d27e296259c93b..5ae93afc67e1f019d6ce6d902ccac733a18a79fb 100644 (file)
@@ -7,5 +7,4 @@ header-y += ptrace-abi.h
 header-y += ucontext.h
 
 unifdef-y += mtrr.h
-unifdef-y += setup.h
 unifdef-y += vm86.h
index b01a7ec409ced2397bc8529051930dd4e3413b67..b8fa9557c532ffdebcfa6324a2b3d722514e336f 100644 (file)
@@ -4,7 +4,7 @@
 #ifdef __KERNEL__
 
 #include <asm/types.h>
-
+#include <linux/stddef.h>
 #include <linux/types.h>
 
 struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
 #define LOCK_PREFIX ""
 #endif
 
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
 #endif /* _I386_ALTERNATIVE_H */
index b9529578fc37e4549d89520266a9b9e1605906ce..41a44319905fa884d58f53384d00376c60d51fb8 100644 (file)
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void);
 /*
  * Basic functions accessing APICs.
  */
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define apic_write native_apic_write
+#define apic_write_atomic native_apic_write_atomic
+#define apic_read native_apic_read
+#endif
 
-static __inline void apic_write(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write(unsigned long reg,
+                                               unsigned long v)
 {
        *((volatile unsigned long *)(APIC_BASE+reg)) = v;
 }
 
-static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write_atomic(unsigned long reg,
+                                                      unsigned long v)
 {
        xchg((volatile unsigned long *)(APIC_BASE+reg), v);
 }
 
-static __inline unsigned long apic_read(unsigned long reg)
+static __inline fastcall unsigned long native_apic_read(unsigned long reg)
 {
        return *((volatile unsigned long *)(APIC_BASE+reg));
 }
index 51a166242522ab9ab9fe5d998986a3ff8637e24e..c57441bb290502c449e4217684711a3b11c34a3f 100644 (file)
@@ -14,7 +14,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i) { (i) }
 
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
        /* Modern 486+ processor */
        __i = i;
        __asm__ __volatile__(
-               LOCK_PREFIX "xaddl %0, %1;"
-               :"=r"(i)
-               :"m"(v->counter), "0"(i));
+               LOCK_PREFIX "xaddl %0, %1"
+               :"+r" (i), "+m" (v->counter)
+               : : "memory");
        return i + __i;
 
 #ifdef CONFIG_M386
index 96b228e6e79cd1c086df76d6c00f7646a06b98f9..8ce79a6fa8919a7bf7557f39aa60fb6e71d1af39 100644 (file)
@@ -12,4 +12,8 @@
 #define EXTENDED_VGA   0xfffe          /* 80x50 mode */
 #define ASK_VGA                0xfffd          /* ask for it at bootup */
 
-#endif
+/* Physical address where kenrel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
+                               & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+#endif /* _LINUX_BOOT_H */
index 592ffeeda45e7745c4554201ce8a78da1a80df38..38f1aebbbdb5f5587df51976e13fd1f352fd36eb 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/msr.h>
+#include <asm/paravirt.h>
 
 static int __init no_halt(char *s)
 {
@@ -91,6 +92,9 @@ static void __init check_fpu(void)
 
 static void __init check_hlt(void)
 {
+       if (paravirt_enabled())
+               return;
+
        printk(KERN_INFO "Checking 'hlt' instruction... ");
        if (!boot_cpu_data.hlt_works_ok) {
                printk("disabled\n");
index b1bc7b1b64b0e304d13f907e4d5cd607417bdfee..9d914e1e4aad446e649dd8678d856768e637a38d 100644 (file)
@@ -13,6 +13,9 @@ struct i386_cpu {
 extern int arch_register_cpu(int num);
 #ifdef CONFIG_HOTPLUG_CPU
 extern void arch_unregister_cpu(int);
+extern int enable_cpu_hotplug;
+#else
+#define enable_cpu_hotplug     0
 #endif
 
 DECLARE_PER_CPU(int, cpu_state);
index d314ebb3d59e0efad1f30f1b9546657787bf4758..3f92b94e0d75893e2e82e8de090cbdaff80a905f 100644 (file)
@@ -31,7 +31,7 @@
 #define X86_FEATURE_PSE36      (0*32+17) /* 36-bit PSEs */
 #define X86_FEATURE_PN         (0*32+18) /* Processor serial number */
 #define X86_FEATURE_CLFLSH     (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES       (0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS         (0*32+21) /* Debug Store */
 #define X86_FEATURE_ACPI       (0*32+22) /* ACPI via MSR */
 #define X86_FEATURE_MMX                (0*32+23) /* Multimedia Extensions */
 #define X86_FEATURE_FXSR       (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -73,6 +73,8 @@
 #define X86_FEATURE_UP         (3*32+ 9) /* smp kernel running on up */
 #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS       (3*32+12)  /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                (3*32+13)  /* Branch Trace Store */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       (4*32+ 0) /* Streaming SIMD Extensions-3 */
 #define cpu_has_phe_enabled    boot_cpu_has(X86_FEATURE_PHE_EN)
 #define cpu_has_pmm            boot_cpu_has(X86_FEATURE_PMM)
 #define cpu_has_pmm_enabled    boot_cpu_has(X86_FEATURE_PMM_EN)
+#define cpu_has_ds             boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs           boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_bts            boot_cpu_has(X86_FEATURE_BTS)
 
 #endif /* __ASM_I386_CPUFEATURE_H */
 
index 3cbbecd790161babf7fa1df4a7c549d4c2107276..5252ee0f6d7a9ce0fd4825ce4381051334b55aea 100644 (file)
@@ -1,13 +1,14 @@
 #ifndef _I386_CURRENT_H
 #define _I386_CURRENT_H
 
-#include <linux/thread_info.h>
+#include <asm/pda.h>
+#include <linux/compiler.h>
 
 struct task_struct;
 
-static __always_inline struct task_struct * get_current(void)
+static __always_inline struct task_struct *get_current(void)
 {
-       return current_thread_info()->task;
+       return read_pda(pcurrent);
 }
  
 #define current get_current()
index b1c7650dc7b9e1a841552b832a0d8a2badb6e7fb..32d6678d0bbf2b1815019f1eaec21b60c2f17fee 100644 (file)
@@ -7,6 +7,7 @@
  * Delay routines calling functions in arch/i386/lib/delay.c
  */
  
+/* Undefined functions to get compile-time errors */
 extern void __bad_udelay(void);
 extern void __bad_ndelay(void);
 
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs);
 extern void __const_udelay(unsigned long usecs);
 extern void __delay(unsigned long loops);
 
+#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
+#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
+
+#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
+
+#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
 #define udelay(n) (__builtin_constant_p(n) ? \
        ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
        __udelay(n))
-       
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
 #define ndelay(n) (__builtin_constant_p(n) ? \
        ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
        __ndelay(n))
+#endif
 
 void use_tsc_delay(void);
 
index 5874ef119ffdc0cf42886585c2c8aed229cfb91e..f398cc456448187bff38c53ad81a314d2d063212 100644 (file)
@@ -4,8 +4,6 @@
 #include <asm/ldt.h>
 #include <asm/segment.h>
 
-#define CPU_16BIT_STACK_SIZE 1024
-
 #ifndef __ASSEMBLY__
 
 #include <linux/preempt.h>
@@ -16,8 +14,6 @@
 
 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 
-DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-
 struct Xgt_desc_struct {
        unsigned short size;
        unsigned long address __attribute__((packed));
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
        return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
 }
 
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
 extern struct desc_struct idt_table[];
 extern void set_intr_gate(unsigned int irq, void * addr);
 
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b,
 #define DESCTYPE_DPL3  0x60    /* DPL-3 */
 #define DESCTYPE_S     0x10    /* !system */
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
 
 #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
 #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
 #undef C
 }
 
+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
 static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
 {
        __u32 *lp = (__u32 *)((char *)dt + entry*8);
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr
        *(lp+1) = entry_b;
 }
 
-#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define set_ldt native_set_ldt
+#endif /* CONFIG_PARAVIRT */
+
+static inline fastcall void native_set_ldt(const void *addr,
+                                          unsigned int entries)
+{
+       if (likely(entries == 0))
+               __asm__ __volatile__("lldt %w0"::"q" (0));
+       else {
+               unsigned cpu = smp_processor_id();
+               __u32 a, b;
+
+               pack_descriptor(&a, &b, (unsigned long)addr,
+                               entries * sizeof(struct desc_struct) - 1,
+                               DESCTYPE_LDT, 0);
+               write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+               __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+       }
+}
 
 static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
 {
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
        write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
 }
 
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
-{
-       __u32 a, b;
-       pack_descriptor(&a, &b, (unsigned long)addr,
-                       entries * sizeof(struct desc_struct) - 1,
-                       DESCTYPE_LDT, 0);
-       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
-}
 
 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
 
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri
 
 static inline void clear_LDT(void)
 {
-       int cpu = get_cpu();
-
-       set_ldt_desc(cpu, &default_ldt[0], 5);
-       load_LDT_desc();
-       put_cpu();
+       set_ldt(NULL, 0);
 }
 
 /*
  * load one particular LDT into the current CPU
  */
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+static inline void load_LDT_nolock(mm_context_t *pc)
 {
-       void *segments = pc->ldt;
-       int count = pc->size;
-
-       if (likely(!count)) {
-               segments = &default_ldt[0];
-               count = 5;
-       }
-               
-       set_ldt_desc(cpu, segments, count);
-       load_LDT_desc();
+       set_ldt(pc->ldt, pc->size);
 }
 
 static inline void load_LDT(mm_context_t *pc)
 {
-       int cpu = get_cpu();
-       load_LDT_nolock(pc, cpu);
-       put_cpu();
+       preempt_disable();
+       load_LDT_nolock(pc);
+       preempt_enable();
 }
 
 static inline unsigned long get_desc_base(unsigned long *desc)
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc)
        return base;
 }
 
+#else /* __ASSEMBLY__ */
+
+/*
+ * GET_DESC_BASE reads the descriptor base of the specified segment.
+ *
+ * Args:
+ *    idx - descriptor index
+ *    gdt - GDT pointer
+ *    base - 32bit register to which the base will be written
+ *    lo_w - lo word of the "base" register
+ *    lo_b - lo byte of the "base" register
+ *    hi_b - hi byte of the low word of the "base" register
+ *
+ * Example:
+ *    GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+ *    Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
+ */
+#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
+       movb idx*8+4(gdt), lo_b; \
+       movb idx*8+7(gdt), hi_b; \
+       shll $16, base; \
+       movw idx*8+2(gdt), lo_w;
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
index 81999a3ebe7c4c712ac91da21fc963e24c638270..183eebeebbdce7a3d1794e79c67862cf93e8f745 100644 (file)
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
        return (1 << INTERNODE_CACHE_SHIFT);
 }
 
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        flush_write_buffers();
index f7514fb6e8e48d5deef18e316823bf95815922b2..395077aba583000b5a22fe55f6dad22b04938c48 100644 (file)
@@ -38,6 +38,11 @@ extern struct e820map e820;
 
 extern int e820_all_mapped(unsigned long start, unsigned long end,
                           unsigned type);
+extern void find_max_pfn(void);
+extern void register_bootmem_low_pages(unsigned long max_low_pfn);
+extern void register_memory(void);
+extern void limit_regions(unsigned long long size);
+extern void print_memory_map(char *who);
 
 #endif/*!__ASSEMBLY__*/
 
index 3a05436f31c0e8252e01389f251d7d5225711583..45d21a0c95bf8127368f7fe273facaabf2f6a9ee 100644 (file)
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
        pr_reg[7] = regs->xds;                          \
        pr_reg[8] = regs->xes;                          \
        savesegment(fs,pr_reg[9]);                      \
-       savesegment(gs,pr_reg[10]);                     \
+       pr_reg[10] = regs->xgs;                         \
        pr_reg[11] = regs->orig_eax;                    \
        pr_reg[12] = regs->eip;                         \
        pr_reg[13] = regs->xcs;                         \
index 946d97cfea23841ccbf919fe7cba1df0fb7abe45..438ef0ec7101c2f4b2fcf4fb711f99e81494712e 100644 (file)
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        if (op == FUTEX_OP_SET)
                __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                }
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index 8ffbb0f0745760f7747d850faf89d884c4cd93eb..fd2be593b06e76b20380d17214dc0c17736bd140 100644 (file)
@@ -122,6 +122,6 @@ struct genapic {
        APICFUNC(phys_pkg_id) \
        }
 
-extern struct genapic *genapic;
+extern struct genapic *genapic, apic_default;
 
 #endif
index bc1d6edae1edfe73b9d77c318fea69b9d0951328..434936c732d67f94f083bbec7198051243feba66 100644 (file)
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk )
 
 #define __unlazy_fpu( tsk ) do { \
        if (task_thread_info(tsk)->status & TS_USEDFPU) \
-               save_init_fpu( tsk ); \
+               save_init_fpu( tsk );                   \
+       else                                            \
+               tsk->fpu_counter = 0;                   \
 } while (0)
 
 #define __clear_fpu( tsk )                                     \
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk )
 extern unsigned short get_fpu_cwd( struct task_struct *tsk );
 extern unsigned short get_fpu_swd( struct task_struct *tsk );
 extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
+extern asmlinkage void math_state_restore(void);
 
 /*
  * Signal frame handlers...
index 68df0dc3ab8ff3e52379a0e6a0afda589c9346b1..86ff5e83be2f6d45d1a209fb30be7eea2192a3df 100644 (file)
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void)
 
 #endif /* __KERNEL__ */
 
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#if defined(CONFIG_PARAVIRT)
+#include <asm/paravirt.h>
 #else
+
 #define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#endif
 
 static inline void slow_down_io(void) {
        __asm__ __volatile__(
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) {
                : : );
 }
 
+#endif
+
 #ifdef CONFIG_X86_NUMAQ
 extern void *xquad_portio;    /* Where the IO area was mapped */
 #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
index 331726b411282646928f286590963bcd0c865c7b..11761cdaae19a00c0faa83ce057ac68029c0b466 100644 (file)
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq)
 extern int irqbalance_disable(char *str);
 #endif
 
+extern void quirk_intel_irqbalance(void);
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void fixup_irqs(cpumask_t map);
 #endif
 
+void init_IRQ(void);
+void __init native_init_IRQ(void);
+
 #endif /* _ASM_IRQ_H */
index 3dd9c0b702704abfe23c0b8085f0fd84dcd3f5d5..a1b3f7f594a26e87d0dcc9902bd45bd4af1eaaf7 100644 (file)
@@ -1 +1,27 @@
-#include <asm-generic/irq_regs.h>
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the PDA.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_I386_IRQ_REGS_H
+#define _ASM_I386_IRQ_REGS_H
+
+#include <asm/pda.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+       return read_pda(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+       struct pt_regs *old_regs;
+
+       old_regs = read_pda(irq_regs);
+       write_pda(irq_regs, new_regs);
+
+       return old_regs;
+}
+
+#endif /* _ASM_I386_IRQ_REGS_H */
index e1bdb97c07faca1952b7734fcd4069f75a8b933f..17b18cf4fe9dc6bc5fb31bca36b55c144cd954e6 100644 (file)
@@ -10,6 +10,9 @@
 #ifndef _ASM_IRQFLAGS_H
 #define _ASM_IRQFLAGS_H
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #ifndef __ASSEMBLY__
 
 static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void)
        return flags;
 }
 
-#define raw_local_save_flags(flags) \
-               do { (flags) = __raw_local_save_flags(); } while (0)
-
 static inline void raw_local_irq_restore(unsigned long flags)
 {
        __asm__ __volatile__(
@@ -66,18 +66,6 @@ static inline void halt(void)
        __asm__ __volatile__("hlt": : :"memory");
 }
 
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
-       return !(flags & (1 << 9));
-}
-
-static inline int raw_irqs_disabled(void)
-{
-       unsigned long flags = __raw_local_save_flags();
-
-       return raw_irqs_disabled_flags(flags);
-}
-
 /*
  * For spinlocks, etc:
  */
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void)
        return flags;
 }
 
+#else
+#define DISABLE_INTERRUPTS(clobbers)   cli
+#define ENABLE_INTERRUPTS(clobbers)    sti
+#define ENABLE_INTERRUPTS_SYSEXIT      sti; sysexit
+#define INTERRUPT_RETURN               iret
+#define GET_CR0_INTO_EAX               movl %cr0, %eax
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
+#ifndef __ASSEMBLY__
+#define raw_local_save_flags(flags) \
+               do { (flags) = __raw_local_save_flags(); } while (0)
+
 #define raw_local_irq_save(flags) \
                do { (flags) = __raw_local_irq_save(); } while (0)
 
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+       return !(flags & (1 << 9));
+}
+
+static inline int raw_irqs_disabled(void)
+{
+       unsigned long flags = __raw_local_save_flags();
+
+       return raw_irqs_disabled_flags(flags);
+}
 #endif /* __ASSEMBLY__ */
 
 /*
index fb42099e7bd4eb043ceaaea57d12e5391c40dd7e..605e3ccb991bde9af67cda27af76fe8fc12b4fb9 100644 (file)
@@ -2,4 +2,6 @@
 
 /* no action for generic */
 
+#ifndef ARCH_SETUP
 #define ARCH_SETUP
+#endif
index 697673b555ce3335f4f84e9653e00e6e4685a999..a4b0aa3320e681f802cb7b297766ec58f26d7f9c 100644 (file)
@@ -21,6 +21,7 @@ struct info {
        long ___eax;
        long ___ds;
        long ___es;
+       long ___fs;
        long ___orig_eax;
        long ___eip;
        long ___cs;
index 62b7bf1840942013a0a258a039a03e90c8458a66..68ff102d6f5e9e19a92641ce43e40caff64a3e92 100644 (file)
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev,
                 * load the LDT, if the LDT is different:
                 */
                if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context, cpu);
+                       load_LDT_nolock(&next->context);
        }
 #ifdef CONFIG_SMP
        else {
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev,
                         * tlb flush IPI delivery. We must reload %cr3.
                         */
                        load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context, cpu);
+                       load_LDT_nolock(&next->context);
                }
        }
 #endif
 }
 
-#define deactivate_mm(tsk, mm) \
-       asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+#define deactivate_mm(tsk, mm)                 \
+       asm("movl %0,%%fs": :"r" (0));
 
 #define activate_mm(prev, next) \
        switch_mm((prev),(next),NULL)
index 61b07332200683de0a2a04d4072847416bd4ebf6..3503ad66945ec4a488cbd192635e5cc7c7db1b4f 100644 (file)
@@ -120,13 +120,26 @@ static inline int pfn_valid(int pfn)
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_low_pages(x) \
        __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(ignore, x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(ignore, x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(ignore, x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-
+#define alloc_bootmem_node(pgdat, x)                                   \
+({                                                                     \
+       struct pglist_data  __attribute__ ((unused))                    \
+                               *__alloc_bootmem_node__pgdat = (pgdat); \
+       __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES,        \
+                                               __pa(MAX_DMA_ADDRESS)); \
+})
+#define alloc_bootmem_pages_node(pgdat, x)                             \
+({                                                                     \
+       struct pglist_data  __attribute__ ((unused))                    \
+                               *__alloc_bootmem_node__pgdat = (pgdat); \
+       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE,              \
+                                               __pa(MAX_DMA_ADDRESS))  \
+})
+#define alloc_bootmem_low_pages_node(pgdat, x)                         \
+({                                                                     \
+       struct pglist_data  __attribute__ ((unused))                    \
+                               *__alloc_bootmem_node__pgdat = (pgdat); \
+       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0);          \
+})
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* _ASM_MMZONE_H_ */
index 424661d25bd320702f08c619c5404c6324647a08..02f8f541cbe021d0b795d9e260fe945e110e9e1a 100644 (file)
@@ -20,6 +20,8 @@ struct mod_arch_specific
 #define MODULE_PROC_FAMILY "586TSC "
 #elif defined CONFIG_M586MMX
 #define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_MCORE2
+#define MODULE_PROC_FAMILY "CORE2 "
 #elif defined CONFIG_M686
 #define MODULE_PROC_FAMILY "686 "
 #elif defined CONFIG_MPENTIUMII
@@ -60,18 +62,12 @@ struct mod_arch_specific
 #error unknown processor family
 #endif
 
-#ifdef CONFIG_REGPARM
-#define MODULE_REGPARM "REGPARM "
-#else
-#define MODULE_REGPARM ""
-#endif
-
 #ifdef CONFIG_4KSTACKS
 #define MODULE_STACKSIZE "4KSTACKS "
 #else
 #define MODULE_STACKSIZE ""
 #endif
 
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
 
 #endif /* _ASM_I386_MODULE_H */
index 76feedf85a8a8ce41e8bd5611d3942661817b9dc..13bafb16e7afd516d7efc47cd164b5461ecc2eaa 100644 (file)
@@ -97,7 +97,6 @@ struct mpc_config_bus
 #define BUSTYPE_TC     "TC"
 #define BUSTYPE_VME    "VME"
 #define BUSTYPE_XPRESS "XPRESS"
-#define BUSTYPE_NEC98  "NEC98"
 
 struct mpc_config_ioapic
 {
@@ -182,7 +181,6 @@ enum mp_bustype {
        MP_BUS_EISA,
        MP_BUS_PCI,
        MP_BUS_MCA,
-       MP_BUS_NEC98
 };
 #endif
 
index 62b76cd96957da8ddfca0a0d1a1508b373be8ab5..5679d4993072650d3e1a77b53656b0f544904c58 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef __ASM_MSR_H
 #define __ASM_MSR_H
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
 /*
  * Access to machine-specific registers (available on 586 and better only)
  * Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
      __asm__ __volatile__("rdpmc" \
                          : "=a" (low), "=d" (high) \
                          : "c" (counter))
+#endif /* !CONFIG_PARAVIRT */
 
 /* symbolic names for some interesting MSRs */
 /* Intel defined MSRs. */
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
 #define MSR_IA32_MC0_ADDR              0x402
 #define MSR_IA32_MC0_MISC              0x403
 
+#define MSR_IA32_PEBS_ENABLE           0x3f1
+#define MSR_IA32_DS_AREA               0x600
+#define MSR_IA32_PERF_CAPABILITIES     0x345
+
 /* Pentium IV performance counter MSRs */
 #define MSR_P4_BPU_PERFCTR0            0x300
 #define MSR_P4_BPU_PERFCTR1            0x301
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
 #define MSR_TMTA_LRTI_READOUT          0x80868018
 #define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
 
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0       0x309
+#define MSR_CORE_PERF_FIXED_CTR1       0x30a
+#define MSR_CORE_PERF_FIXED_CTR2       0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL   0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS    0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL      0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL  0x390
+
 #endif /* __ASM_MSR_H */
index 269d315719ca422d478e7d18cd441cf71c44438d..b04333ea6f31e9bd338b9842d1afeec1ea9f7ba4 100644 (file)
@@ -5,6 +5,9 @@
 #define ASM_NMI_H
 
 #include <linux/pm.h>
+#include <asm/irq.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
 
 /**
  * do_nmi_callback
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
 #endif /* ASM_NMI_H */
index f5bf544c729a373aa4ec1832aa9fa3cd55a13800..fd3f64ace24872f1e0843049434c95fff4cf2516 100644 (file)
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t;
 #define pte_val(x)     ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
 #define __pmd(x) ((pmd_t) { (x) } )
 #define HPAGE_SHIFT    21
+#include <asm-generic/pgtable-nopud.h>
 #else
 typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgd; } pgd_t;
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
 #define boot_pte_t pte_t /* or would you rather have a typedef */
 #define pte_val(x)     ((x).pte_low)
 #define HPAGE_SHIFT    22
+#include <asm-generic/pgtable-nopmd.h>
 #endif
 #define PTE_MASK       PAGE_MASK
 
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr);
 
 #ifdef __ASSEMBLY__
 #define __PAGE_OFFSET          CONFIG_PAGE_OFFSET
-#define __PHYSICAL_START       CONFIG_PHYSICAL_START
 #else
 #define __PAGE_OFFSET          ((unsigned long)CONFIG_PAGE_OFFSET)
-#define __PHYSICAL_START       ((unsigned long)CONFIG_PHYSICAL_START)
 #endif
-#define __KERNEL_START         (__PAGE_OFFSET + __PHYSICAL_START)
 
 
 #define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
 #define VMALLOC_RESERVE                ((unsigned long)__VMALLOC_RESERVE)
 #define MAXMEM                 (-__PAGE_OFFSET-__VMALLOC_RESERVE)
 #define __pa(x)                        ((unsigned long)(x)-PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+   This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x)          __pa(RELOC_HIDE((unsigned long)(x),0))
 #define __va(x)                        ((void *)((unsigned long)(x)+PAGE_OFFSET))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 #ifdef CONFIG_FLATMEM
index 745dc5bd0fbc159e835fe530c074682ca8223efe..21b32466fcdcbbe42882192a0acee77eeab71ae6 100644 (file)
@@ -18,6 +18,5 @@
 #endif
 
 #define MAXHOSTNAMELEN 64      /* max length of hostname */
-#define COMMAND_LINE_SIZE 256
 
 #endif
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644 (file)
index 0000000..9f06265
--- /dev/null
@@ -0,0 +1,505 @@
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_PARAVIRT
+/* These are the most performance critical ops, so we want to be able to patch
+ * callers */
+#define PARAVIRT_IRQ_DISABLE 0
+#define PARAVIRT_IRQ_ENABLE 1
+#define PARAVIRT_RESTORE_FLAGS 2
+#define PARAVIRT_SAVE_FLAGS 3
+#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
+#define PARAVIRT_INTERRUPT_RETURN 5
+#define PARAVIRT_STI_SYSEXIT 6
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0x0
+#define CLBR_EAX 0x1
+#define CLBR_ECX 0x2
+#define CLBR_EDX 0x4
+#define CLBR_ANY 0x7
+
+#ifndef __ASSEMBLY__
+struct thread_struct;
+struct Xgt_desc_struct;
+struct tss_struct;
+struct mm_struct;
+struct paravirt_ops
+{
+       unsigned int kernel_rpl;
+       int paravirt_enabled;
+       const char *name;
+
+       /*
+        * Patch may replace one of the defined code sequences with arbitrary
+        * code, subject to the same register constraints.  This generally
+        * means the code is not free to clobber any registers other than EAX.
+        * The patch function should return the number of bytes of code
+        * generated, as we nop pad the rest in generic code.
+        */
+       unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+
+       void (*arch_setup)(void);
+       char *(*memory_setup)(void);
+       void (*init_IRQ)(void);
+
+       void (*banner)(void);
+
+       unsigned long (*get_wallclock)(void);
+       int (*set_wallclock)(unsigned long);
+       void (*time_init)(void);
+
+       /* All the function pointers here are declared as "fastcall"
+          so that we get a specific register-based calling
+          convention.  This makes it easier to implement inline
+          assembler replacements. */
+
+       void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+                     unsigned int *ecx, unsigned int *edx);
+
+       unsigned long (fastcall *get_debugreg)(int regno);
+       void (fastcall *set_debugreg)(int regno, unsigned long value);
+
+       void (fastcall *clts)(void);
+
+       unsigned long (fastcall *read_cr0)(void);
+       void (fastcall *write_cr0)(unsigned long);
+
+       unsigned long (fastcall *read_cr2)(void);
+       void (fastcall *write_cr2)(unsigned long);
+
+       unsigned long (fastcall *read_cr3)(void);
+       void (fastcall *write_cr3)(unsigned long);
+
+       unsigned long (fastcall *read_cr4_safe)(void);
+       unsigned long (fastcall *read_cr4)(void);
+       void (fastcall *write_cr4)(unsigned long);
+
+       unsigned long (fastcall *save_fl)(void);
+       void (fastcall *restore_fl)(unsigned long);
+       void (fastcall *irq_disable)(void);
+       void (fastcall *irq_enable)(void);
+       void (fastcall *safe_halt)(void);
+       void (fastcall *halt)(void);
+       void (fastcall *wbinvd)(void);
+
+       /* err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
+       u64 (fastcall *read_msr)(unsigned int msr, int *err);
+       int (fastcall *write_msr)(unsigned int msr, u64 val);
+
+       u64 (fastcall *read_tsc)(void);
+       u64 (fastcall *read_pmc)(void);
+
+       void (fastcall *load_tr_desc)(void);
+       void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
+       void (fastcall *load_idt)(const struct Xgt_desc_struct *);
+       void (fastcall *store_gdt)(struct Xgt_desc_struct *);
+       void (fastcall *store_idt)(struct Xgt_desc_struct *);
+       void (fastcall *set_ldt)(const void *desc, unsigned entries);
+       unsigned long (fastcall *store_tr)(void);
+       void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
+       void (fastcall *write_ldt_entry)(void *dt, int entrynum,
+                                        u32 low, u32 high);
+       void (fastcall *write_gdt_entry)(void *dt, int entrynum,
+                                        u32 low, u32 high);
+       void (fastcall *write_idt_entry)(void *dt, int entrynum,
+                                        u32 low, u32 high);
+       void (fastcall *load_esp0)(struct tss_struct *tss,
+                                  struct thread_struct *thread);
+
+       void (fastcall *set_iopl_mask)(unsigned mask);
+
+       void (fastcall *io_delay)(void);
+       void (*const_udelay)(unsigned long loops);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       void (fastcall *apic_write)(unsigned long reg, unsigned long v);
+       void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
+       unsigned long (fastcall *apic_read)(unsigned long reg);
+#endif
+
+       void (fastcall *flush_tlb_user)(void);
+       void (fastcall *flush_tlb_kernel)(void);
+       void (fastcall *flush_tlb_single)(u32 addr);
+
+       void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
+       void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
+       void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+       void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+       void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+#ifdef CONFIG_X86_PAE
+       void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
+       void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+       void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
+       void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+       void (fastcall *pmd_clear)(pmd_t *pmdp);
+#endif
+
+       /* These two are jmp to, not actually called. */
+       void (fastcall *irq_enable_sysexit)(void);
+       void (fastcall *iret)(void);
+};
+
+/* Mark a paravirt probe function. */
+#define paravirt_probe(fn)                                             \
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+               __attribute__((__section__(".paravirtprobe"))) = fn
+
+extern struct paravirt_ops paravirt_ops;
+
+#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
+
+static inline void load_esp0(struct tss_struct *tss,
+                            struct thread_struct *thread)
+{
+       paravirt_ops.load_esp0(tss, thread);
+}
+
+#define ARCH_SETUP                     paravirt_ops.arch_setup();
+static inline unsigned long get_wallclock(void)
+{
+       return paravirt_ops.get_wallclock();
+}
+
+static inline int set_wallclock(unsigned long nowtime)
+{
+       return paravirt_ops.set_wallclock(nowtime);
+}
+
+static inline void do_time_init(void)
+{
+       return paravirt_ops.time_init();
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+                          unsigned int *ecx, unsigned int *edx)
+{
+       paravirt_ops.cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
+#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
+
+#define clts() paravirt_ops.clts()
+
+#define read_cr0() paravirt_ops.read_cr0()
+#define write_cr0(x) paravirt_ops.write_cr0(x)
+
+#define read_cr2() paravirt_ops.read_cr2()
+#define write_cr2(x) paravirt_ops.write_cr2(x)
+
+#define read_cr3() paravirt_ops.read_cr3()
+#define write_cr3(x) paravirt_ops.write_cr3(x)
+
+#define read_cr4() paravirt_ops.read_cr4()
+#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
+#define write_cr4(x) paravirt_ops.write_cr4(x)
+
+static inline void raw_safe_halt(void)
+{
+       paravirt_ops.safe_halt();
+}
+
+static inline void halt(void)
+{
+       paravirt_ops.safe_halt();
+}
+#define wbinvd() paravirt_ops.wbinvd()
+
+#define get_kernel_rpl()  (paravirt_ops.kernel_rpl)
+
+#define rdmsr(msr,val1,val2) do {                              \
+       int _err;                                               \
+       u64 _l = paravirt_ops.read_msr(msr,&_err);              \
+       val1 = (u32)_l;                                         \
+       val2 = _l >> 32;                                        \
+} while(0)
+
+#define wrmsr(msr,val1,val2) do {                              \
+       u64 _l = ((u64)(val2) << 32) | (val1);                  \
+       paravirt_ops.write_msr((msr), _l);                      \
+} while(0)
+
+#define rdmsrl(msr,val) do {                                   \
+       int _err;                                               \
+       val = paravirt_ops.read_msr((msr),&_err);               \
+} while(0)
+
+#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+#define wrmsr_safe(msr,a,b) ({                                 \
+       u64 _l = ((u64)(b) << 32) | (a);                        \
+       paravirt_ops.write_msr((msr),_l);                       \
+})
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({                                 \
+       int _err;                                               \
+       u64 _l = paravirt_ops.read_msr(msr,&_err);              \
+       (*a) = (u32)_l;                                         \
+       (*b) = _l >> 32;                                        \
+       _err; })
+
+#define rdtsc(low,high) do {                                   \
+       u64 _l = paravirt_ops.read_tsc();                       \
+       low = (u32)_l;                                          \
+       high = _l >> 32;                                        \
+} while(0)
+
+#define rdtscl(low) do {                                       \
+       u64 _l = paravirt_ops.read_tsc();                       \
+       low = (int)_l;                                          \
+} while(0)
+
+#define rdtscll(val) (val = paravirt_ops.read_tsc())
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) do {                           \
+       u64 _l = paravirt_ops.read_pmc();                       \
+       low = (u32)_l;                                          \
+       high = _l >> 32;                                        \
+} while(0)
+
+#define load_TR_desc() (paravirt_ops.load_tr_desc())
+#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
+#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
+#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
+#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
+#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
+#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
+#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
+#define write_ldt_entry(dt, entry, low, high)                          \
+       (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
+#define write_gdt_entry(dt, entry, low, high)                          \
+       (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
+#define write_idt_entry(dt, entry, low, high)                          \
+       (paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
+#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void) {
+       paravirt_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+       paravirt_ops.io_delay();
+       paravirt_ops.io_delay();
+       paravirt_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Basic functions accessing APICs.
+ */
+static inline void apic_write(unsigned long reg, unsigned long v)
+{
+       paravirt_ops.apic_write(reg,v);
+}
+
+static inline void apic_write_atomic(unsigned long reg, unsigned long v)
+{
+       paravirt_ops.apic_write_atomic(reg,v);
+}
+
+static inline unsigned long apic_read(unsigned long reg)
+{
+       return paravirt_ops.apic_read(reg);
+}
+#endif
+
+
+#define __flush_tlb() paravirt_ops.flush_tlb_user()
+#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
+#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       paravirt_ops.set_pte(ptep, pteval);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+       paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+       paravirt_ops.set_pmd(pmdp, pmdval);
+}
+
+static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+       paravirt_ops.pte_update(mm, addr, ptep);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+       paravirt_ops.pte_update_defer(mm, addr, ptep);
+}
+
+#ifdef CONFIG_X86_PAE
+static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+       paravirt_ops.set_pte_atomic(ptep, pteval);
+}
+
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+       paravirt_ops.set_pte_present(mm, addr, ptep, pte);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pudval)
+{
+       paravirt_ops.set_pud(pudp, pudval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+       paravirt_ops.pte_clear(mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       paravirt_ops.pmd_clear(pmdp);
+}
+#endif
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch {
+       u8 *instr;              /* original instructions */
+       u8 instrtype;           /* type of this instruction */
+       u8 len;                 /* length of original instruction */
+       u16 clobbers;           /* what registers you may clobber */
+};
+
+#define paravirt_alt(insn_string, typenum, clobber)    \
+       "771:\n\t" insn_string "\n" "772:\n"            \
+       ".pushsection .parainstructions,\"a\"\n"        \
+       "  .long 771b\n"                                \
+       "  .byte " __stringify(typenum) "\n"            \
+       "  .byte 772b-771b\n"                           \
+       "  .short " __stringify(clobber) "\n"           \
+       ".popsection"
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+       unsigned long f;
+
+       __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+                                          "call *%1;"
+                                          "popl %%edx; popl %%ecx",
+                                         PARAVIRT_SAVE_FLAGS, CLBR_NONE)
+                            : "=a"(f): "m"(paravirt_ops.save_fl)
+                            : "memory", "cc");
+       return f;
+}
+
+static inline void raw_local_irq_restore(unsigned long f)
+{
+       __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+                                          "call *%1;"
+                                          "popl %%edx; popl %%ecx",
+                                         PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
+                            : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
+                            : "memory", "cc");
+}
+
+static inline void raw_local_irq_disable(void)
+{
+       __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+                                          "call *%0;"
+                                          "popl %%edx; popl %%ecx",
+                                         PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+                            : : "m" (paravirt_ops.irq_disable)
+                            : "memory", "eax", "cc");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+       __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+                                          "call *%0;"
+                                          "popl %%edx; popl %%ecx",
+                                         PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+                            : : "m" (paravirt_ops.irq_enable)
+                            : "memory", "eax", "cc");
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+       unsigned long f;
+
+       __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+                                          "call *%1; pushl %%eax;"
+                                          "call *%2; popl %%eax;"
+                                          "popl %%edx; popl %%ecx",
+                                         PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
+                                         CLBR_NONE)
+                            : "=a"(f)
+                            : "m" (paravirt_ops.save_fl),
+                              "m" (paravirt_ops.irq_disable)
+                            : "memory", "cc");
+       return f;
+}
+
+#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;"            \
+                    "call *paravirt_ops+%c[irq_disable];"              \
+                    "popl %%edx; popl %%ecx",                          \
+                    PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+
+#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;"            \
+                    "call *paravirt_ops+%c[irq_enable];"               \
+                    "popl %%edx; popl %%ecx",                          \
+                    PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+#define CLI_STI_CLOBBERS , "%eax"
+#define CLI_STI_INPUT_ARGS \
+       ,                                                               \
+       [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
+       [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
+
+#else  /* __ASSEMBLY__ */
+
+#define PARA_PATCH(ptype, clobbers, ops)       \
+771:;                                          \
+       ops;                                    \
+772:;                                          \
+       .pushsection .parainstructions,"a";     \
+        .long 771b;                            \
+        .byte ptype;                           \
+        .byte 772b-771b;                       \
+        .short clobbers;                       \
+       .popsection
+
+#define INTERRUPT_RETURN                               \
+       PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
+       jmp *%cs:paravirt_ops+PARAVIRT_iret)
+
+#define DISABLE_INTERRUPTS(clobbers)                   \
+       PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers,      \
+       pushl %ecx; pushl %edx;                         \
+       call *paravirt_ops+PARAVIRT_irq_disable;        \
+       popl %edx; popl %ecx)                           \
+
+#define ENABLE_INTERRUPTS(clobbers)                    \
+       PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers,       \
+       pushl %ecx; pushl %edx;                         \
+       call *%cs:paravirt_ops+PARAVIRT_irq_enable;     \
+       popl %edx; popl %ecx)
+
+#define ENABLE_INTERRUPTS_SYSEXIT                      \
+       PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY,      \
+       jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
+
+#define GET_CR0_INTO_EAX                       \
+       call *paravirt_ops+PARAVIRT_read_cr0
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
new file mode 100644 (file)
index 0000000..2ba2736
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+   Per-processor Data Areas
+   Jeremy Fitzhardinge <jeremy@goop.org> 2006
+   Based on asm-x86_64/pda.h by Andi Kleen.
+ */
+#ifndef _I386_PDA_H
+#define _I386_PDA_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct i386_pda
+{
+       struct i386_pda *_pda;          /* pointer to self */
+
+       int cpu_number;
+       struct task_struct *pcurrent;   /* current process */
+       struct pt_regs *irq_regs;
+};
+
+extern struct i386_pda *_cpu_pda[];
+
+#define cpu_pda(i)     (_cpu_pda[i])
+
+#define pda_offset(field) offsetof(struct i386_pda, field)
+
+extern void __bad_pda_field(void);
+
+/* This variable is never instantiated.  It is only used as a stand-in
+   for the real per-cpu PDA memory, so that gcc can understand what
+   memory operations the inline asms() below are performing.  This
+   eliminates the need to make the asms volatile or have memory
+   clobbers, so gcc can readily analyse them. */
+extern struct i386_pda _proxy_pda;
+
+#define pda_to_op(op,field,val)                                                \
+       do {                                                            \
+               typedef typeof(_proxy_pda.field) T__;                   \
+               if (0) { T__ tmp__; tmp__ = (val); }                    \
+               switch (sizeof(_proxy_pda.field)) {                     \
+               case 1:                                                 \
+                       asm(op "b %1,%%gs:%c2"                          \
+                           : "+m" (_proxy_pda.field)                   \
+                           :"ri" ((T__)val),                           \
+                            "i"(pda_offset(field)));                   \
+                       break;                                          \
+               case 2:                                                 \
+                       asm(op "w %1,%%gs:%c2"                          \
+                           : "+m" (_proxy_pda.field)                   \
+                           :"ri" ((T__)val),                           \
+                            "i"(pda_offset(field)));                   \
+                       break;                                          \
+               case 4:                                                 \
+                       asm(op "l %1,%%gs:%c2"                          \
+                           : "+m" (_proxy_pda.field)                   \
+                           :"ri" ((T__)val),                           \
+                            "i"(pda_offset(field)));                   \
+                       break;                                          \
+               default: __bad_pda_field();                             \
+               }                                                       \
+       } while (0)
+
+#define pda_from_op(op,field)                                          \
+       ({                                                              \
+               typeof(_proxy_pda.field) ret__;                         \
+               switch (sizeof(_proxy_pda.field)) {                     \
+               case 1:                                                 \
+                       asm(op "b %%gs:%c1,%0"                          \
+                           : "=r" (ret__)                              \
+                           : "i" (pda_offset(field)),                  \
+                             "m" (_proxy_pda.field));                  \
+                       break;                                          \
+               case 2:                                                 \
+                       asm(op "w %%gs:%c1,%0"                          \
+                           : "=r" (ret__)                              \
+                           : "i" (pda_offset(field)),                  \
+                             "m" (_proxy_pda.field));                  \
+                       break;                                          \
+               case 4:                                                 \
+                       asm(op "l %%gs:%c1,%0"                          \
+                           : "=r" (ret__)                              \
+                           : "i" (pda_offset(field)),                  \
+                             "m" (_proxy_pda.field));                  \
+                       break;                                          \
+               default: __bad_pda_field();                             \
+               }                                                       \
+               ret__; })
+
+/* Return a pointer to a pda field */
+#define pda_addr(field)                                                        \
+       ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
+                                     pda_offset(field)))
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
+
+#endif /* _I386_PDA_H */
index 5764afa4b6a4b4a690a595cad8df504a8b43cf8c..510ae1d3486c7c289b80e765af719aa80c166c94 100644 (file)
@@ -1,6 +1,31 @@
 #ifndef __ARCH_I386_PERCPU__
 #define __ARCH_I386_PERCPU__
 
+#ifndef __ASSEMBLY__
 #include <asm-generic/percpu.h>
+#else
+
+/*
+ * PER_CPU finds an address of a per-cpu variable.
+ *
+ * Args:
+ *    var - variable name
+ *    cpu - 32bit register containing the current CPU number
+ *
+ * The resulting address is stored in the "cpu" argument.
+ *
+ * Example:
+ *    PER_CPU(cpu_gdt_descr, %ebx)
+ */
+#ifdef CONFIG_SMP
+#define PER_CPU(var, cpu) \
+       movl __per_cpu_offset(,cpu,4), cpu;     \
+       addl $per_cpu__/**/var, cpu;
+#else /* ! SMP */
+#define PER_CPU(var, cpu) \
+       movl $per_cpu__/**/var, cpu;
+#endif /* SMP */
+
+#endif /* !__ASSEMBLY__ */
 
 #endif /* __ARCH_I386_PERCPU__ */
index 8d8d3b9ecdb02d0437b0e0393fc8e753cd439178..38c3fcc0676d9810c1f1ca4d168bdb519c6e0438 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _I386_PGTABLE_2LEVEL_H
 #define _I386_PGTABLE_2LEVEL_H
 
-#include <asm-generic/pgtable-nopmd.h>
-
 #define pte_ERROR(e) \
        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
 #define pgd_ERROR(e) \
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
+#ifndef CONFIG_PARAVIRT
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
 #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
 
 #define pte_clear(mm,addr,xp)  do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 #define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
 
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
+#define raw_ptep_get_and_clear(xp)     __pte(xchg(&(xp)->pte_low, 0))
 
 #define pte_page(x)            pfn_to_page(pte_pfn(x))
 #define pte_none(x)            (!(x).pte_low)
index c2d701ea35beb0650ecd0cd8360f857a1e90ca31..7a2318f3830316ccb5d542ff05b0fdbb009f00b2 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _I386_PGTABLE_3LEVEL_H
 #define _I386_PGTABLE_3LEVEL_H
 
-#include <asm-generic/pgtable-nopud.h>
-
 /*
  * Intel Physical Address Extension (PAE) Mode - three-level page
  * tables on PPro+ CPUs.
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte)
        return pte_x(pte);
 }
 
+#ifndef CONFIG_PARAVIRT
 /* Rules for using set_pte: the pte being assigned *must* be
  * either not present or in a state where the hardware will
  * not attempt to update the pte.  In places where this is
@@ -80,25 +79,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
 #define set_pud(pudptr,pudval) \
                (*(pudptr) = (pudval))
 
-/*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
- * We do not let the generic code free and clear pgd entries due to
- * this erratum.
- */
-static inline void pud_clear (pud_t * pud) { }
-
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
-
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
-                       pmd_index(address))
-
 /*
  * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
  * entry, so clear the bottom half first and enforce ordering with a compiler
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd)
        smp_wmb();
        *(tmp + 1) = 0;
 }
+#endif
+
+/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
+ */
+static inline void pud_clear (pud_t * pud) { }
+
+#define pud_page(pud) \
+((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+#define pud_page_vaddr(pud) \
+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+                       pmd_index(address))
 
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
 {
        pte_t res;
 
index 7d398f493ddeedfc2d5e826d595028767ae64b76..e6a4723f0eb1f088affdc67cd42a8f5d7c02ce21 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/processor.h>
 #include <asm/fixmap.h>
 #include <linux/threads.h>
+#include <asm/paravirt.h>
 
 #ifndef _I386_BITOPS_H
 #include <asm/bitops.h>
@@ -34,14 +35,14 @@ struct vm_area_struct;
 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 extern unsigned long empty_zero_page[1024];
 extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
+extern struct kmem_cache *pgd_cache;
+extern struct kmem_cache *pmd_cache;
 extern spinlock_t pgd_lock;
 extern struct page *pgd_list;
 
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_dtor(void *, struct kmem_cache *, unsigned long);
 void pgtable_cache_init(void);
 void paging_init(void);
 
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte)   { (pte).pte_low |= _PAGE_PSE; return p
 # include <asm/pgtable-2level.h>
 #endif
 
+#ifndef CONFIG_PARAVIRT
 /*
  * Rules for using pte_update - it must be called after any PTE update which
  * has not been done using the set_pte / clear_pte interfaces.  It is used by
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte)   { (pte).pte_low |= _PAGE_PSE; return p
  */
 #define pte_update(mm, addr, ptep)             do { } while (0)
 #define pte_update_defer(mm, addr, ptep)       do { } while (0)
-
+#endif
 
 /*
  * We only update the dirty/accessed state if we set
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte)   { (pte).pte_low |= _PAGE_PSE; return p
 do {                                                                   \
        if (dirty) {                                                    \
                (ptep)->pte_low = (entry).pte_low;                      \
-               pte_update_defer((vma)->vm_mm, (addr), (ptep));         \
+               pte_update_defer((vma)->vm_mm, (address), (ptep));      \
                flush_tlb_page(vma, address);                           \
        }                                                               \
 } while (0)
@@ -305,7 +307,7 @@ do {                                                                        \
        __dirty = pte_dirty(*(ptep));                                   \
        if (__dirty) {                                                  \
                clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low);           \
-               pte_update_defer((vma)->vm_mm, (addr), (ptep));         \
+               pte_update_defer((vma)->vm_mm, (address), (ptep));      \
                flush_tlb_page(vma, address);                           \
        }                                                               \
        __dirty;                                                        \
@@ -318,12 +320,20 @@ do {                                                                      \
        __young = pte_young(*(ptep));                                   \
        if (__young) {                                                  \
                clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low);        \
-               pte_update_defer((vma)->vm_mm, (addr), (ptep));         \
+               pte_update_defer((vma)->vm_mm, (address), (ptep));      \
                flush_tlb_page(vma, address);                           \
        }                                                               \
        __young;                                                        \
 })
 
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+       pte_t pte = raw_ptep_get_and_clear(ptep);
+       pte_update(mm, addr, ptep);
+       return pte;
+}
+
 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
 {
index e0ddca94d50c8f42c71e05e1796086fdf84ad0ef..a52d65440429b99520ff6fa355ccd95d92611619 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/threads.h>
 #include <asm/percpu.h>
 #include <linux/cpumask.h>
+#include <linux/init.h>
 
 /* flag for disabling the tsc */
 extern int tsc_disable;
@@ -72,6 +73,7 @@ struct cpuinfo_x86 {
 #endif
        unsigned char x86_max_cores;    /* cpuid returned max cores value */
        unsigned char apicid;
+       unsigned short x86_clflush_size;
 #ifdef CONFIG_SMP
        unsigned char booted_cores;     /* number of cores as seen by OS */
        __u8 phys_proc_id;              /* Physical processor id. */
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[];
 extern int cpu_llc_id[NR_CPUS];
 extern char ignore_fpu_irq;
 
+void __init cpu_detect(struct cpuinfo_x86 *c);
+
 extern void identify_cpu(struct cpuinfo_x86 *);
 extern void print_cpu_info(struct cpuinfo_x86 *);
 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
 #define X86_EFLAGS_ID  0x00200000 /* CPUID detection flag */
 
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
-                          unsigned int *ecx, unsigned int *edx)
+static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
+                                        unsigned int *ecx, unsigned int *edx)
 {
        /* ecx is often an input as well as an output. */
        __asm__("cpuid"
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
                : "0" (*eax), "2" (*ecx));
 }
 
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-{
-       *eax = op;
-       *ecx = 0;
-       __cpuid(eax, ebx, ecx, edx);
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
-                              int *edx)
-{
-       *eax = op;
-       *ecx = count;
-       __cpuid(eax, ebx, ecx, edx);
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       cpuid(op, &eax, &ebx, &ecx, &edx);
-       return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       cpuid(op, &eax, &ebx, &ecx, &edx);
-       return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       cpuid(op, &eax, &ebx, &ecx, &edx);
-       return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       cpuid(op, &eax, &ebx, &ecx, &edx);
-       return edx;
-}
-
 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
 
 /*
@@ -473,6 +424,7 @@ struct thread_struct {
        .vm86_info = NULL,                                              \
        .sysenter_cs = __KERNEL_CS,                                     \
        .io_bitmap_ptr = NULL,                                          \
+       .gs = __KERNEL_PDA,                                             \
 }
 
 /*
@@ -489,18 +441,9 @@ struct thread_struct {
        .io_bitmap      = { [ 0 ... IO_BITMAP_LONGS] = ~0 },            \
 }
 
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-{
-       tss->esp0 = thread->esp0;
-       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
-       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
-               tss->ss1 = thread->sysenter_cs;
-               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
-       }
-}
-
 #define start_thread(regs, new_eip, new_esp) do {              \
-       __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));       \
+       __asm__("movl %0,%%fs": :"r" (0));                      \
+       regs->xgs = 0;                                          \
        set_fs(USER_DS);                                        \
        regs->xds = __USER_DS;                                  \
        regs->xes = __USER_DS;                                  \
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
        regs->esp = new_esp;                                    \
 } while (0)
 
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register)                            \
-               __asm__("movl %%db" #register ", %0"            \
-                       :"=r" (var))
-#define set_debugreg(value, register)                  \
-               __asm__("movl %0,%%db" #register                \
-                       : /* no output */                       \
-                       :"r" (value))
-
-/*
- * Set IOPL bits in EFLAGS from given mask
- */
-static inline void set_iopl_mask(unsigned mask)
-{
-       unsigned int reg;
-       __asm__ __volatile__ ("pushfl;"
-                             "popl %0;"
-                             "andl %1, %0;"
-                             "orl %2, %0;"
-                             "pushl %0;"
-                             "popfl"
-                               : "=&r" (reg)
-                               : "i" (~X86_EFLAGS_IOPL), "r" (mask));
-}
-
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct mm_struct;
@@ -628,6 +544,105 @@ static inline void rep_nop(void)
 
 #define cpu_relax()    rep_nop()
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_enabled() 0
+#define __cpuid native_cpuid
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+       tss->esp0 = thread->esp0;
+       /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+       if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+               tss->ss1 = thread->sysenter_cs;
+               wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+       }
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register)                            \
+               __asm__("movl %%db" #register ", %0"            \
+                       :"=r" (var))
+#define set_debugreg(value, register)                  \
+               __asm__("movl %0,%%db" #register                \
+                       : /* no output */                       \
+                       :"r" (value))
+
+#define set_iopl_mask native_set_iopl_mask
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static fastcall inline void native_set_iopl_mask(unsigned mask)
+{
+       unsigned int reg;
+       __asm__ __volatile__ ("pushfl;"
+                             "popl %0;"
+                             "andl %1, %0;"
+                             "orl %2, %0;"
+                             "pushl %0;"
+                             "popfl"
+                               : "=&r" (reg)
+                               : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+}
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+       *eax = op;
+       *ecx = 0;
+       __cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+                              int *edx)
+{
+       *eax = op;
+       *ecx = count;
+       __cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       cpuid(op, &eax, &ebx, &ecx, &edx);
+       return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       cpuid(op, &eax, &ebx, &ecx, &edx);
+       return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       cpuid(op, &eax, &ebx, &ecx, &edx);
+       return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       cpuid(op, &eax, &ebx, &ecx, &edx);
+       return edx;
+}
+
 /* generic versions from gas */
 #define GENERIC_NOP1   ".byte 0x90\n"
 #define GENERIC_NOP2           ".byte 0x89,0xf6\n"
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override;
 extern void enable_sep_cpu(void);
 extern int sysenter_setup(void);
 
+extern int init_gdt(int cpu, struct task_struct *idle);
+extern void secondary_cpu_init(void);
+
 #endif /* __ASM_I386_PROCESSOR_H */
index d505f501077a67bc77b01729a724b61eb6e26b7a..bdbc894339b470b55fa6ed7848602bd8e5f4a071 100644 (file)
@@ -16,6 +16,8 @@ struct pt_regs {
        long eax;
        int  xds;
        int  xes;
+       /* int  xfs; */
+       int  xgs;
        long orig_eax;
        long eip;
        int  xcs;
index bc598d6388e30ad44db8ae857402d7af4a2db3aa..041906f3c6df73375abda597ba38e63922ffb542 100644 (file)
@@ -75,8 +75,8 @@ struct rw_semaphore {
 
 
 #define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
-       __RWSEM_DEP_MAP_INIT(name) }
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+  LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
index b7ab59685ba7f1784769f7247ba839c048e5f72a..3c796af337762629038d39bc4ecc2031c7fff2a6 100644 (file)
@@ -39,7 +39,7 @@
  *  25 - APM BIOS support 
  *
  *  26 - ESPFIX small SS
- *  27 - unused
+ *  27 - PDA                           [ per-cpu private data area ]
  *  28 - unused
  *  29 - unused
  *  30 - unused
@@ -74,6 +74,9 @@
 #define GDT_ENTRY_ESPFIX_SS            (GDT_ENTRY_KERNEL_BASE + 14)
 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
 
+#define GDT_ENTRY_PDA                  (GDT_ENTRY_KERNEL_BASE + 15)
+#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS      31
 
 /*
 #define SEGMENT_LDT            0x4
 #define SEGMENT_GDT            0x0
 
+#ifndef CONFIG_PARAVIRT
 #define get_kernel_rpl()  0
 #endif
+#endif
index 2734909eff840f82b676b8ed8cb7ab208ea6a473..67659dbaf120a8dd4ec816bcde1734736fd20026 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef _i386_SETUP_H
 #define _i386_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 #include <linux/pfn.h>
 
  */
 #define MAXMEM_PFN     PFN_DOWN(MAXMEM)
 #define MAX_NONPAE_PFN (1 << 20)
-#endif
 
 #define PARAM_SIZE 4096
-#define COMMAND_LINE_SIZE 256
 
 #define OLD_CL_MAGIC_ADDR      0x90020
 #define OLD_CL_MAGIC           0xA33F
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE];
 struct e820entry;
 
 char * __init machine_specific_memory_setup(void);
+char *memory_setup(void);
 
 int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
 int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
@@ -78,4 +79,6 @@ void __init add_memory_region(unsigned long long start,
 
 #endif /* __ASSEMBLY__ */
 
+#endif  /*  __KERNEL__  */
+
 #endif /* _i386_SETUP_H */
index bd59c1508e7127bd592bdd09d9d69e6a528ba3e9..64fe624c02caa5ed80838428a5366aa385924888 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <asm/pda.h>
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -56,7 +57,7 @@ extern void cpu_uninit(void);
  * from the initial startup. We map APIC_BASE very early in page_setup(),
  * so this is correct in the x86 case.
  */
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (read_pda(cpu_number))
 
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_callin_map;
index c18b71fae6b38f6ec0a524509deb4a229de002ad..d3bcebed60ca6ef4895b7a22ea139023f4a6d49f 100644 (file)
@@ -7,8 +7,14 @@
 #include <asm/processor.h>
 #include <linux/compiler.h>
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define CLI_STRING     "cli"
 #define STI_STRING     "sti"
+#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
+#endif /* CONFIG_PARAVIRT */
 
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
 {
        asm volatile(
                "\n1:\t"
-               LOCK_PREFIX " ; decb %0\n\t"
+               LOCK_PREFIX " ; decb %[slock]\n\t"
                "jns 5f\n"
                "2:\t"
-               "testl $0x200, %1\n\t"
+               "testl $0x200, %[flags]\n\t"
                "jz 4f\n\t"
                STI_STRING "\n"
                "3:\t"
                "rep;nop\n\t"
-               "cmpb $0, %0\n\t"
+               "cmpb $0, %[slock]\n\t"
                "jle 3b\n\t"
                CLI_STRING "\n\t"
                "jmp 1b\n"
                "4:\t"
                "rep;nop\n\t"
-               "cmpb $0, %0\n\t"
+               "cmpb $0, %[slock]\n\t"
                "jg 1b\n\t"
                "jmp 4b\n"
                "5:\n\t"
-               : "+m" (lock->slock) : "r" (flags) : "memory");
+               : [slock] "+m" (lock->slock)
+               : [flags] "r" (flags)
+                 CLI_STI_INPUT_ARGS
+               : "memory" CLI_STI_CLOBBERS);
 }
 #endif
 
index 59efe849f351f0bc556b73e1f54dcfec1d09463a..4da9345c15001803a3e7786811f685a0ec5c4616 100644 (file)
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-       volatile unsigned int slock;
+       unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
 
 typedef struct {
-       volatile unsigned int lock;
+       unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
index 08be1e5009d4d773dbb77078674450ceb117f6ba..8dbaafe611ffc744b426d944bb893c54445f2e32 100644 (file)
@@ -6,29 +6,14 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int
-arch_prepare_suspend(void)
-{
-       /* If you want to make non-PSE machine work, turn off paging
-           in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
-           it could work...  */
-       if (!cpu_has_pse) {
-               printk(KERN_ERR "PSE is required for swsusp.\n");
-               return -EPERM;
-       }
-       return 0;
-}
+static inline int arch_prepare_suspend(void) { return 0; }
 
 /* image of the saved processor state */
 struct saved_context {
        u16 es, fs, gs, ss;
        unsigned long cr0, cr2, cr3, cr4;
-       u16 gdt_pad;
-       u16 gdt_limit;
-       unsigned long gdt_base;
-       u16 idt_pad;
-       u16 idt_limit;
-       unsigned long idt_base;
+       struct Xgt_desc_struct gdt;
+       struct Xgt_desc_struct idt;
        u16 ldt;
        u16 tss;
        unsigned long tr;
index a6dabbcd6e6a71ec61d06b3eb74d21d3181e1aad..a6d20d9a1a307b0f6bbf55351e91fb26f48ad6b0 100644 (file)
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define savesegment(seg, value) \
        asm volatile("mov %%" #seg ",%0":"=rm" (value))
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
 #define read_cr0() ({ \
        unsigned int __dummy; \
        __asm__ __volatile__( \
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
 #define write_cr4(x) \
        __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
 
-/*
- * Clear and set 'TS' bit respectively
- */
+#define wbinvd() \
+       __asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
 #define clts() __asm__ __volatile__ ("clts")
+#endif/* CONFIG_PARAVIRT */
+
+/* Set the 'TS' bit */
 #define stts() write_cr0(8 | read_cr0())
 
 #endif /* __KERNEL__ */
 
-#define wbinvd() \
-       __asm__ __volatile__ ("wbinvd": : :"memory")
-
 static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
index 54d6d7aea938cf4fb1d8266418f9df1f790274c0..46d32ad9208256ab045785cdc355b1253860a486 100644 (file)
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void)
 
 /* thread information allocation */
 #ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk)                                 \
-       ({                                                      \
-               struct thread_info *ret;                        \
-                                                               \
-               ret = kmalloc(THREAD_SIZE, GFP_KERNEL);         \
-               if (ret)                                        \
-                       memset(ret, 0, THREAD_SIZE);            \
-               ret;                                            \
-       })
+#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
 #else
 #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
 #endif
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644 (file)
index 0000000..ea8065a
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef _ASMi386_TIME_H
+#define _ASMi386_TIME_H
+
+#include <linux/efi.h>
+#include "mach_time.h"
+
+static inline unsigned long native_get_wallclock(void)
+{
+       unsigned long retval;
+
+       if (efi_enabled)
+               retval = efi_get_time();
+       else
+               retval = mach_get_cmos_time();
+
+       return retval;
+}
+
+static inline int native_set_wallclock(unsigned long nowtime)
+{
+       int retval;
+
+       if (efi_enabled)
+               retval = efi_set_rtc_mmss(nowtime);
+       else
+               retval = mach_set_rtc_mmss(nowtime);
+
+       return retval;
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+
+#define get_wallclock() native_get_wallclock()
+#define set_wallclock(x) native_set_wallclock(x)
+#define do_time_init() time_init_hook()
+
+#endif /* CONFIG_PARAVIRT */
+
+#endif
index 360648b0f2b3888235e95f8b7420879934a9e0bc..4dd82840d53bafd18d3538f0a1a52ec1d1f204a5 100644 (file)
@@ -4,7 +4,15 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                                  \
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+#define __native_flush_tlb()                                           \
        do {                                                            \
                unsigned int tmpreg;                                    \
                                                                        \
@@ -19,7 +27,7 @@
  * Global pages have to be flushed a bit differently. Not a real
  * performance problem because this does not happen often.
  */
-#define __flush_tlb_global()                                           \
+#define __native_flush_tlb_global()                                    \
        do {                                                            \
                unsigned int tmpreg, cr4, cr4_orig;                     \
                                                                        \
@@ -36,6 +44,9 @@
                        : "memory");                                    \
        } while (0)
 
+#define __native_flush_tlb_single(addr)                                \
+       __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
+
 # define __flush_tlb_all()                                             \
        do {                                                            \
                if (cpu_has_pge)                                        \
@@ -46,9 +57,6 @@
 
 #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
 
-#define __flush_tlb_single(addr) \
-       __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
-
 #ifdef CONFIG_X86_INVLPG
 # define __flush_tlb_one(addr) __flush_tlb_single(addr)
 #else
index 4b4b295ccdb9b75deca70c45467571cd7fdf5c38..ad0a55bd782f203acb29b5bbea353783b9ee0ac6 100644 (file)
@@ -57,16 +57,6 @@ typedef u32 dma_addr_t;
 #endif
 typedef u64 dma64_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index beeeaf6b054a178db754757b6b9547fa784725d3..833fa1704ff995c32a412f6fdf09b9f62a9cdf43 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls 320
-#include <linux/err.h>
-
-/*
- * user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-i386/errno.h>
- */
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-               errno = -(res); \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile ("int $0x80" \
-       : "=a" (__res) \
-       : "0" (__NR_##name)); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-                 "d" ((long)(arg3)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-         "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
-                  "int $0x80 ; pop %%ebx" \
-       : "=a" (__res) \
-       : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
-         "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-  struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
-__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
-                  "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
-                  "pop %%ebx ;  pop %%ebp" \
-       : "=a" (__res) \
-       : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
-         "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 5031d693b89d35fb3f7cfce8db78a79bb1db5503..aa2c931e30dbd0f6540b894dc110021aa215e4bd 100644 (file)
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
        info->regs.xss = __KERNEL_DS;
        info->regs.xds = __USER_DS;
        info->regs.xes = __USER_DS;
+       info->regs.xgs = __KERNEL_PDA;
 }
 
 extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
                                                                           void *arg),
                                                void *arg);
 
-static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
 {
-#if 0 /* This can only work when selector register and EFLAGS saves/restores
-         are properly annotated (and tracked in UNW_REGISTER_INFO). */
-       return user_mode_vm(&info->regs);
-#else
-       return info->regs.eip < PAGE_OFFSET
+       return user_mode_vm(&info->regs)
+              || info->regs.eip < PAGE_OFFSET
               || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
-                   && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+                  && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
               || info->regs.esp < PAGE_OFFSET;
-#endif
 }
 
 #else
index 952fd695738073e0c5e4639a85ade992b3349b39..a5edf517b992d51a3689cb1539a7dd12f1368c13 100644 (file)
@@ -145,26 +145,13 @@ struct vm86plus_struct {
  * at the end of the structure. Look at ptrace.h to see the "normal"
  * setup. For user space layout see 'struct vm86_regs' above.
  */
+#include <asm/ptrace.h>
 
 struct kernel_vm86_regs {
 /*
  * normal regs, with special meaning for the segment descriptors..
  */
-       long ebx;
-       long ecx;
-       long edx;
-       long esi;
-       long edi;
-       long ebp;
-       long eax;
-       long __null_ds;
-       long __null_es;
-       long orig_eax;
-       long eip;
-       unsigned short cs, __csh;
-       long eflags;
-       long esp;
-       unsigned short ss, __ssh;
+       struct pt_regs pt;
 /*
  * these are specific to v86 mode:
  */
index 15818a18bc520930035aed74691c317706da2df1..4a1e48b9f4031b2808d973d2679e7a52cdbbab11 100644 (file)
@@ -10,7 +10,6 @@ header-y += intrinsics.h
 header-y += perfmon_default_smpl.h
 header-y += ptrace_offsets.h
 header-y += rse.h
-header-y += setup.h
 header-y += ucontext.h
 
 unifdef-y += perfmon.h
index 99a8f8e1218c18283fbfe96baf1547d5e3b500d9..ebd5887f4b1a39102e4070b190ee5b55a5500e56 100644 (file)
@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask)
 extern int dma_get_cache_alignment(void);
 
 static inline void
-dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction dir)
 {
        /*
         * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
@@ -59,6 +60,6 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
        mb();
 }
 
-#define dma_is_consistent(dma_handle)  (1)     /* all we do is coherent memory... */
+#define dma_is_consistent(d, h)        (1)     /* all we do is coherent memory... */
 
 #endif /* _ASM_IA64_DMA_MAPPING_H */
index 07d77f3a8cbe278bbad15d99e80442b86cbd7d83..8a98a26541391ed17cbcebf97b0b98ef5a1efa31 100644 (file)
@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index 9cb68e9b377e2c831f71187dc1314643b83fc933..393e04c42a2c127affc71db782b51214221e0934 100644 (file)
@@ -60,7 +60,7 @@ static inline void *pgtable_quicklist_alloc(void)
 static inline void pgtable_quicklist_free(void *pgtable_entry)
 {
 #ifdef CONFIG_NUMA
-       unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
+       int nid = page_to_nid(virt_to_page(pgtable_entry));
 
        if (unlikely(nid != numa_node_id())) {
                free_page((unsigned long)pgtable_entry);
index 52f4fa29abfc7e8854d672beb277dd46bba9f3d1..6a0b32202d4e0ee901b3c93f44dcd87d7761a738 100644 (file)
@@ -1,6 +1,11 @@
 /*
  * This is set up by the setup-routine at boot-time
  */
+
+#define COMMAND_LINE_SIZE       512
+
+#ifdef __KERNEL__
+
 #define PARAM                  ((unsigned char *)empty_zero_page)
 
 #define MOUNT_ROOT_RDONLY      (*(unsigned long *) (PARAM+0x000))
@@ -18,8 +23,6 @@
 
 #define SCREEN_INFO            (*(struct screen_info *) (PARAM+0x200))
 
-#define COMMAND_LINE_SIZE      (512)
-
 #define RAMDISK_IMAGE_START_MASK       (0x07FF)
 #define RAMDISK_PROMPT_FLAG            (0x8000)
 #define RAMDISK_LOAD_FLAG              (0x4000)
@@ -27,3 +30,5 @@
 extern unsigned long memory_start;
 extern unsigned long memory_end;
 
+#endif  /*  __KERNEL__  */
+
index 95aa34298d8211d85252976da8931d49f9ef5665..5b66bd3c6ed663cd6cb9d9dd4f7cba21eaec66f7 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls 285
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-m32r/errno.h>
- */
-
-#include <asm/syscall.h>       /* SYSCALL_* */
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-       /* Avoid using "res" which is declared to be in register r0; \
-          errno might expand to a function call and clobber it.  */ \
-               int __err = -(res); \
-               errno = __err; \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__("r0"); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno), "0" (__res) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno), "0" (__res), "r" (__arg2) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno), "0" (__res), "r" (__arg2), \
-               "r" (__arg3) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno), "0" (__res), "r" (__arg2), \
-               "r" (__arg3), "r" (__arg4) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-       type5,arg5) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg5 __asm__ ("r4") = (long)(arg5); \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
-       "trap #" SYSCALL_VECTOR "|| nop"\
-       : "=r" (__res) \
-       : "r" (__scno), "0" (__res), "r" (__arg2), \
-               "r" (__arg3), "r" (__arg4), "r" (__arg5) \
-       : "memory"); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_STAT64
index d90d841d3dfde278269de086af882d87346a9892..00259ed6fc9532487dda91ec8873febfe0d33a59 100644 (file)
@@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void)
        return 1 << L1_CACHE_SHIFT;
 }
 
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
 {
        dma_free_coherent(dev, size, addr, handle);
 }
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        /* we use coherent allocation, so not much to do here. */
index 7facc9a46e74dde0ce6f792cab5fd3189fa1e003..2a8853cd655455b1cedbd639e4e8fcd0b146014f 100644 (file)
 #define MACH_Q40     10
 #define MACH_SUN3X   11
 
+#define COMMAND_LINE_SIZE 256
+
 #ifdef __KERNEL__
 
+#define CL_SIZE COMMAND_LINE_SIZE
+
 #ifndef __ASSEMBLY__
 extern unsigned long m68k_machtype;
 #endif /* !__ASSEMBLY__ */
@@ -355,8 +359,6 @@ extern int m68k_is040or060;
      */
 
 #define NUM_MEMINFO    4
-#define CL_SIZE                256
-#define COMMAND_LINE_SIZE      CL_SIZE
 
 #ifndef __ASSEMBLY__
 struct mem_info {
index ad4348058c66f18779e61b3f8a607b45cdd78f27..fdbb60e6a0d4310a2b61e75649ea4ca32ed45ac1 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls            311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-       /* avoid using res which is declared to be in register d0; \
-          errno might expand to a function call and clobber it.  */ \
-               int __err = -(res); \
-               errno = __err; \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-__asm__ __volatile__ ("trap  #0" \
-                     : "+d" (__res) \
-                     : "d" (__a)  ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a,btype b) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-__asm__ __volatile__ ("trap  #0" \
-                     : "+d" (__res) \
-                      : "d" (__a), "d" (__b) \
-                    ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a,btype b,ctype c) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-__asm__ __volatile__ ("trap  #0" \
-                     : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-                       "d" (__c) \
-                    ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name (atype a, btype b, ctype c, dtype d) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-__asm__ __volatile__ ("trap  #0" \
-                      : "+d" (__res) \
-                      : "d" (__a), "d" (__b), \
-                       "d" (__c), "d" (__d)  \
-                    ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-register long __e __asm__ ("%d5") = (long)(e); \
-__asm__ __volatile__ ("trap  #0" \
-                     : "+d" (__res) \
-                     : "d" (__a), "d" (__b), \
-                       "d" (__c), "d" (__d), "d" (__e)  \
-                     ); \
-__syscall_return(type,__res); \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 45e7a2fd16893b0b830da7868e15dc3a7268316f..7b8f874f8429c8b8e526a7af5d780b1db13df806 100644 (file)
@@ -86,5 +86,6 @@ extern void (*mach_disable_irq)(unsigned int);
 #define enable_irq(x)  do { } while (0)
 #define disable_irq(x) do { } while (0)
 #define disable_irq_nosync(x)  disable_irq(x)
+#define irq_canonicalize(irq)  (irq)
 
 #endif /* _M68K_IRQ_H_ */
diff --git a/include/asm-m68knommu/rtc.h b/include/asm-m68knommu/rtc.h
new file mode 100644 (file)
index 0000000..eaf18ec
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-m68k/rtc.h>
index d2b0fcce41b2d71e935d53c2341a3dbbaadc5479..fb86bb2a6078712d907a2f3705d4147538a503de 100644 (file)
@@ -1,5 +1,10 @@
+#ifdef __KERNEL__
+
 #include <asm-m68k/setup.h>
 
 /* We have a bigger command line buffer. */
 #undef COMMAND_LINE_SIZE
+
+#endif  /*  __KERNEL__  */
+
 #define COMMAND_LINE_SIZE      512
index 5d570cedbb02a471d9be15162c4468b886b0e134..713a27f901cdb7b717dbeead3ae85baffa3248c4 100644 (file)
@@ -5,21 +5,17 @@ typedef int greg_t;
 #define NGREG 18
 typedef greg_t gregset_t[NGREG];
 
-#ifdef CONFIG_FPU
 typedef struct fpregset {
        int f_pcr;
        int f_psr;
        int f_fpiaddr;
        int f_fpregs[8][3];
 } fpregset_t;
-#endif
 
 struct mcontext {
        int version;
        gregset_t gregs;
-#ifdef CONFIG_FPU
        fpregset_t fpregs;
-#endif
 };
 
 #define MCONTEXT_VERSION 2
@@ -29,9 +25,7 @@ struct ucontext {
        struct ucontext  *uc_link;
        stack_t           uc_stack;
        struct mcontext   uc_mcontext;
-#ifdef CONFIG_FPU
        unsigned long     uc_filler[80];
-#endif
        sigset_t          uc_sigmask;   /* mask last for extensibility */
 };
 
index ebaf03197114d0cd831a85cdddf3a11dec6db3c2..82e03195f325730531775529bdc5d8bbfb50120e 100644 (file)
 #ifdef __KERNEL__
 
 #define NR_syscalls            311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
-   <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-       /* avoid using res which is declared to be in register d0; \
-          errno might expand to a function call and clobber it.  */ \
-               int __err = -(res); \
-               errno = __err; \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name)                                                  \
-type name(void)                                                                        \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name)                                     \
-                       : "cc", "%d0");                                         \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
-
-#define _syscall1(type, name, atype, a)                                                \
-type name(atype a)                                                             \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %2, %%d1\n\t"                                   \
-                       "movel  %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name),                                    \
-                         "g" ((long)a)                                         \
-                       : "cc", "%d0", "%d1");                                  \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
-
-#define _syscall2(type, name, atype, a, btype, b)                              \
-type name(atype a, btype b)                                                    \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %3, %%d2\n\t"                                   \
-                       "movel  %2, %%d1\n\t"                                   \
-                       "movel  %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name),                                    \
-                         "a" ((long)a),                                        \
-                         "g" ((long)b)                                         \
-                       : "cc", "%d0", "%d1", "%d2");                           \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)                    \
-type name(atype a, btype b, ctype c)                                           \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %4, %%d3\n\t"                                   \
-                       "movel  %3, %%d2\n\t"                                   \
-                       "movel  %2, %%d1\n\t"                                   \
-                       "movel  %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name),                                    \
-                         "a" ((long)a),                                        \
-                         "a" ((long)b),                                        \
-                         "g" ((long)c)                                         \
-                       : "cc", "%d0", "%d1", "%d2", "%d3");                    \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)          \
-type name(atype a, btype b, ctype c, dtype d)                                  \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %5, %%d4\n\t"                                   \
-                       "movel  %4, %%d3\n\t"                                   \
-                       "movel  %3, %%d2\n\t"                                   \
-                       "movel  %2, %%d1\n\t"                                   \
-                       "movel  %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name),                                    \
-                         "a" ((long)a),                                        \
-                         "a" ((long)b),                                        \
-                         "a" ((long)c),                                        \
-                         "g" ((long)d)                                         \
-                       : "cc", "%d0", "%d1", "%d2", "%d3",                     \
-                         "%d4");                                               \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e)        \
-type name(atype a, btype b, ctype c, dtype d, etype e)                         \
-{                                                                              \
-  long __res;                                                                  \
-  __asm__ __volatile__ ("movel %6, %%d5\n\t"                                   \
-                       "movel  %5, %%d4\n\t"                                   \
-                       "movel  %4, %%d3\n\t"                                   \
-                       "movel  %3, %%d2\n\t"                                   \
-                       "movel  %2, %%d1\n\t"                                   \
-                       "movel  %1, %%d0\n\t"                                   \
-                       "trap   #0\n\t"                                         \
-                       "movel  %%d0, %0"                                       \
-                       : "=g" (__res)                                          \
-                       : "i" (__NR_##name),                                    \
-                         "a" ((long)a),                                        \
-                         "a" ((long)b),                                        \
-                         "a" ((long)c),                                        \
-                         "a" ((long)d),                                        \
-                         "g" ((long)e)                                         \
-                       : "cc", "%d0", "%d1", "%d2", "%d3",                     \
-                         "%d4", "%d5");                                        \
-  if ((unsigned long)(__res) >= (unsigned long)(-125)) {                               \
-    errno = -__res;                                                            \
-    __res = -1;                                                                        \
-  }                                                                            \
-  return (type)__res;                                                          \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 7978d8e11647d5fc783be1b72a7704c9f8876f7f..c1a2409bb52a171fc1c1c0b9070b7791f350e812 100644 (file)
@@ -15,6 +15,7 @@
 #define _ASM_ATOMIC_H
 
 #include <linux/irqflags.h>
+#include <asm/barrier.h>
 #include <asm/cpu-features.h>
 #include <asm/war.h>
 
@@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                "       sc      %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                "       sc      %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
@@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                "       sc      %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                "       sc      %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
@@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "       beqzl   %0, 1b                                  \n"
                "        subu   %0, %1, %3                              \n"
                "       .set    reorder                                 \n"
-               "       sync                                            \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "       beqz    %0, 1b                                  \n"
                "        subu   %0, %1, %3                              \n"
                "       .set    reorder                                 \n"
-               "       sync                                            \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
@@ -375,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 
 #ifdef CONFIG_64BIT
 
-typedef struct { volatile __s64 counter; } atomic64_t;
+typedef struct { volatile long counter; } atomic64_t;
 
 #define ATOMIC64_INIT(i)    { (i) }
 
@@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       addu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
@@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                "       scd     %0, %2                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                "       scd     %0, %2                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       subu    %0, %1, %3                              \n"
-               "       sync                                            \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
                : "Ir" (i), "m" (v->counter)
@@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
@@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 {
        unsigned long result;
 
+       smp_mb();
+
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
 
@@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "       beqzl   %0, 1b                                  \n"
                "        dsubu  %0, %1, %3                              \n"
                "       .set    reorder                                 \n"
-               "       sync                                            \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "       beqz    %0, 1b                                  \n"
                "        dsubu  %0, %1, %3                              \n"
                "       .set    reorder                                 \n"
-               "       sync                                            \n"
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp), "=m" (v->counter)
@@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                local_irq_restore(flags);
        }
 
+       smp_mb();
+
        return result;
 }
 
diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h
new file mode 100644 (file)
index 0000000..ed82631
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+/*
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier.  All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads.  This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies.  See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     b = 2;
+ *     memory_barrier();
+ *     p = &b;                         q = p;
+ *                                     read_barrier_depends();
+ *                                     d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends().  However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ *     CPU 0                           CPU 1
+ *
+ *     a = 2;
+ *     memory_barrier();
+ *     b = 3;                          y = b;
+ *                                     read_barrier_depends();
+ *                                     x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
+ * in cases like this where there are no data dependencies.
+ */
+
+#define read_barrier_depends()         do { } while(0)
+#define smp_read_barrier_depends()     do { } while(0)
+
+#ifdef CONFIG_CPU_HAS_SYNC
+#define __sync()                               \
+       __asm__ __volatile__(                   \
+               ".set   push\n\t"               \
+               ".set   noreorder\n\t"          \
+               ".set   mips2\n\t"              \
+               "sync\n\t"                      \
+               ".set   pop"                    \
+               : /* no output */               \
+               : /* no input */                \
+               : "memory")
+#else
+#define __sync()       do { } while(0)
+#endif
+
+#define __fast_iob()                           \
+       __asm__ __volatile__(                   \
+               ".set   push\n\t"               \
+               ".set   noreorder\n\t"          \
+               "lw     $0,%0\n\t"              \
+               "nop\n\t"                       \
+               ".set   pop"                    \
+               : /* no output */               \
+               : "m" (*(int *)CKSEG1)          \
+               : "memory")
+
+#define fast_wmb()     __sync()
+#define fast_rmb()     __sync()
+#define fast_mb()      __sync()
+#define fast_iob()                             \
+       do {                                    \
+               __sync();                       \
+               __fast_iob();                   \
+       } while (0)
+
+#ifdef CONFIG_CPU_HAS_WB
+
+#include <asm/wbflush.h>
+
+#define wmb()          fast_wmb()
+#define rmb()          fast_rmb()
+#define mb()           wbflush()
+#define iob()          wbflush()
+
+#else /* !CONFIG_CPU_HAS_WB */
+
+#define wmb()          fast_wmb()
+#define rmb()          fast_rmb()
+#define mb()           fast_mb()
+#define iob()          fast_iob()
+
+#endif /* !CONFIG_CPU_HAS_WB */
+
+#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
+#define __WEAK_ORDERING_MB     "       sync    \n"
+#else
+#define __WEAK_ORDERING_MB     "               \n"
+#endif
+
+#define smp_mb()       __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_rmb()      __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+#define smp_wmb()      __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
+
+#define set_mb(var, value) \
+       do { var = value; smp_mb(); } while (0)
+
+#endif /* __ASM_BARRIER_H */
index b9007411b60f977e8393e450d70bcfe3388a7f50..06445de1324bd3c058a507d08d0c747d6ab571ae 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
+ * Copyright (c) 1994 - 1997, 1999, 2000, 06  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
  */
 #ifndef _ASM_BITOPS_H
@@ -12,6 +12,7 @@
 #include <linux/compiler.h>
 #include <linux/irqflags.h>
 #include <linux/types.h>
+#include <asm/barrier.h>
 #include <asm/bug.h>
 #include <asm/byteorder.h>             /* sigh ... */
 #include <asm/cpu-features.h>
@@ -204,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr,
                "       " __SC  "%2, %1                                 \n"
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -226,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr,
                "       " __SC  "%2, %1                                 \n"
                "       beqz    %2, 1b                                  \n"
                "        and    %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    pop                                     \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -250,6 +245,8 @@ static inline int test_and_set_bit(unsigned long nr,
 
                return retval;
        }
+
+       smp_mb();
 }
 
 /*
@@ -275,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr,
                "       " __SC  "%2, %1                                 \n"
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -298,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr,
                "       " __SC  "%2, %1                                 \n"
                "       beqz    %2, 1b                                  \n"
                "        and    %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    pop                                     \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -322,6 +313,8 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                return retval;
        }
+
+       smp_mb();
 }
 
 /*
@@ -346,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr,
                "       " __SC  "%2, %1                                 \n"
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -368,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr,
                "       " __SC  "\t%2, %1                               \n"
                "       beqz    %2, 1b                                  \n"
                "        and    %2, %0, %3                              \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    pop                                     \n"
                : "=&r" (temp), "=m" (*m), "=&r" (res)
                : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
@@ -391,6 +378,8 @@ static inline int test_and_change_bit(unsigned long nr,
 
                return retval;
        }
+
+       smp_mb();
 }
 
 #include <asm-generic/bitops/non-atomic.h>
index 900f472fdd2b5fc92fc6547893d5a9938a71731f..55a0152feb08c01246b8a58387a1f0eea0826670 100644 (file)
@@ -32,6 +32,7 @@ typedef struct {
        s32     val[2];
 } compat_fsid_t;
 typedef s32            compat_timer_t;
+typedef s32            compat_key_t;
 
 typedef s32            compat_int_t;
 typedef s32            compat_long_t;
@@ -146,4 +147,71 @@ static inline void __user *compat_alloc_user_space(long len)
        return (void __user *) (regs->regs[29] - len);
 }
 
+struct compat_ipc64_perm {
+       compat_key_t key;
+       __compat_uid32_t uid;
+       __compat_gid32_t gid;
+       __compat_uid32_t cuid;
+       __compat_gid32_t cgid;
+       compat_mode_t mode;
+       unsigned short seq;
+       unsigned short __pad2;
+       compat_ulong_t __unused1;
+       compat_ulong_t __unused2;
+};
+
+struct compat_semid64_ds {
+       struct compat_ipc64_perm sem_perm;
+       compat_time_t   sem_otime;
+       compat_time_t   sem_ctime;
+       compat_ulong_t  sem_nsems;
+       compat_ulong_t  __unused1;
+       compat_ulong_t  __unused2;
+};
+
+struct compat_msqid64_ds {
+       struct compat_ipc64_perm msg_perm;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused1;
+#endif
+       compat_time_t   msg_stime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused1;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused2;
+#endif
+       compat_time_t   msg_rtime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused2;
+#endif
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused3;
+#endif
+       compat_time_t   msg_ctime;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       compat_ulong_t  __unused3;
+#endif
+       compat_ulong_t  msg_cbytes;
+       compat_ulong_t  msg_qnum;
+       compat_ulong_t  msg_qbytes;
+       compat_pid_t    msg_lspid;
+       compat_pid_t    msg_lrpid;
+       compat_ulong_t  __unused4;
+       compat_ulong_t  __unused5;
+};
+
+struct compat_shmid64_ds {
+       struct compat_ipc64_perm shm_perm;
+       compat_size_t   shm_segsz;
+       compat_time_t   shm_atime;
+       compat_time_t   shm_dtime;
+       compat_time_t   shm_ctime;
+       compat_pid_t    shm_cpid;
+       compat_pid_t    shm_lpid;
+       compat_ulong_t  shm_nattch;
+       compat_ulong_t  __unused1;
+       compat_ulong_t  __unused2;
+};
+
 #endif /* _ASM_COMPAT_H */
index 43288634c38a7bbca2793d295f801de22ffaf2ea..236d1a467cc7fb49ae8dd028c8b66c1d2e3cd471 100644 (file)
@@ -63,9 +63,9 @@ dma_get_cache_alignment(void)
        return 128;
 }
 
-extern int dma_is_consistent(dma_addr_t dma_addr);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
 
-extern void dma_cache_sync(void *vaddr, size_t size,
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction);
 
 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
index ed023eae067447d24e19ac421d694de5bcb7e624..47e5679c235303f5812eb1d4cd39a2140af89016 100644 (file)
@@ -1,19 +1,21 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2006  Ralf Baechle (ralf@linux-mips.org)
+ */
 #ifndef _ASM_FUTEX_H
 #define _ASM_FUTEX_H
 
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <asm/barrier.h>
 #include <asm/errno.h>
 #include <asm/uaccess.h>
 #include <asm/war.h>
 
-#ifdef CONFIG_SMP
-#define __FUTEX_SMP_SYNC "     sync                                    \n"
-#else
-#define __FUTEX_SMP_SYNC
-#endif
-
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
 {                                                                      \
        if (cpu_has_llsc && R10000_LLSC_WAR) {                          \
@@ -27,7 +29,7 @@
                "       .set    mips3                           \n"     \
                "2:     sc      $1, %2                          \n"     \
                "       beqzl   $1, 1b                          \n"     \
-               __FUTEX_SMP_SYNC                                        \
+               __WEAK_ORDERING_MB                                      \
                "3:                                             \n"     \
                "       .set    pop                             \n"     \
                "       .set    mips0                           \n"     \
@@ -53,7 +55,7 @@
                "       .set    mips3                           \n"     \
                "2:     sc      $1, %2                          \n"     \
                "       beqz    $1, 1b                          \n"     \
-               __FUTEX_SMP_SYNC                                        \
+               __WEAK_ORDERING_MB                                      \
                "3:                                             \n"     \
                "       .set    pop                             \n"     \
                "       .set    mips0                           \n"     \
@@ -86,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -113,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
@@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    mips3                                   \n"
                "2:     sc      $1, %1                                  \n"
                "       beqzl   $1, 1b                                  \n"
-               __FUTEX_SMP_SYNC
+               __WEAK_ORDERING_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
@@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    mips3                                   \n"
                "2:     sc      $1, %1                                  \n"
                "       beqz    $1, 1b                                  \n"
-               __FUTEX_SMP_SYNC
+               __WEAK_ORDERING_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
index c976bfaaba83a1bafec7f40df8c806a845aec442..f8c8182f7f2e642a0326af17534c43d6fcd3fe8e 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/uaccess.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -70,11 +71,16 @@ static inline void *kmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
+       pagefault_disable();
        return page_address(page);
 }
 
-static inline void kunmap_atomic(void *kvaddr, enum km_type type) { }
-#define kmap_atomic_pfn(pfn, idx)      page_address(pfn_to_page(pfn))
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+       pagefault_enable();
+}
+
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
 
 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
 
index 0214abe3f0af5b68af05649e829bbaa2608c0b66..4df8d8b118c05fef180002091c18e6b56fcf187f 100644 (file)
 
 #include <asm/io.h>
 
+/* i8259A PIC registers */
+#define PIC_MASTER_CMD         0x20
+#define PIC_MASTER_IMR         0x21
+#define PIC_MASTER_ISR         PIC_MASTER_CMD
+#define PIC_MASTER_POLL                PIC_MASTER_ISR
+#define PIC_MASTER_OCW3                PIC_MASTER_ISR
+#define PIC_SLAVE_CMD          0xa0
+#define PIC_SLAVE_IMR          0xa1
+
+/* i8259A PIC related value */
+#define PIC_CASCADE_IR         2
+#define MASTER_ICW4_DEFAULT    0x01
+#define SLAVE_ICW4_DEFAULT     0x01
+#define PIC_ICW4_AEOI          2
+
 extern spinlock_t i8259A_lock;
 
+extern void init_8259A(int auto_eoi);
+extern void enable_8259A_irq(unsigned int irq);
+extern void disable_8259A_irq(unsigned int irq);
+
 extern void init_i8259_irqs(void);
 
+#define I8259A_IRQ_BASE        0
+
 /*
  * Do the traditional i8259 interrupt polling thing.  This is for the few
  * cases where no better interrupt acknowledge method is available and we
@@ -35,15 +56,15 @@ static inline int i8259_irq(void)
        spin_lock(&i8259A_lock);
 
        /* Perform an interrupt acknowledge cycle on controller 1. */
-       outb(0x0C, 0x20);               /* prepare for poll */
-       irq = inb(0x20) & 7;
-       if (irq == 2) {
+       outb(0x0C, PIC_MASTER_CMD);             /* prepare for poll */
+       irq = inb(PIC_MASTER_CMD) & 7;
+       if (irq == PIC_CASCADE_IR) {
                /*
                 * Interrupt is cascaded so perform interrupt
                 * acknowledge on controller 2.
                 */
-               outb(0x0C, 0xA0);               /* prepare for poll */
-               irq = (inb(0xA0) & 7) + 8;
+               outb(0x0C, PIC_SLAVE_CMD);              /* prepare for poll */
+               irq = (inb(PIC_SLAVE_CMD) & 7) + 8;
        }
 
        if (unlikely(irq == 7)) {
@@ -54,14 +75,14 @@ static inline int i8259_irq(void)
                 * significant bit is not set then there is no valid
                 * interrupt.
                 */
-               outb(0x0B, 0x20);               /* ISR register */
-               if(~inb(0x20) & 0x80)
+               outb(0x0B, PIC_MASTER_ISR);             /* ISR register */
+               if(~inb(PIC_MASTER_ISR) & 0x80)
                        irq = -1;
        }
 
        spin_unlock(&i8259A_lock);
 
-       return irq;
+       return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq;
 }
 
 #endif /* _ASM_I8259_H */
index d20f2e9b28be09819d91432b9718a78dc32d52e6..2fbd47eba32de46b1021533df34d26f1233b6f2d 100644 (file)
@@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #define __pte_offset(address)                                          \
        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)                                       \
-       ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
-       ((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+       ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
+#define pte_offset_kernel(dir, address)                                        \
+       ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 
 #define pte_offset_map(dir, address)                                    \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
index b9b1e86493eebf9862ceb6982018588a056881a0..a5b18710b6a4a30f0238d07d275ac19d109c40c9 100644 (file)
@@ -212,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
 #define __pte_offset(address)                                          \
        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 #define pte_offset(dir, address)                                       \
-       ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address))
+       ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_kernel(dir, address)                                        \
-       ((pte_t *) pmd_page_vaddr(*(dir)) +  __pte_offset(address))
+       ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
 #define pte_offset_map_nested(dir, address)                            \
index 737fa4a6912e4c94316fb8f363dba738b77d77c8..70009a902639170fcf929938228e320635803f74 100644 (file)
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
 #ifndef _MIPS_SETUP_H
 #define _MIPS_SETUP_H
 
 #define COMMAND_LINE_SIZE      256
 
 #endif /* __SETUP_H */
-#endif /* __KERNEL__ */
index b63cd0655b3d554d2145acd134d2815c67e69f22..15d70ca561870cb9d48db00c7183b9377df0ee0a 100644 (file)
@@ -176,7 +176,7 @@ typedef struct kl_config_hdr {
 /* --- New Macros for the changed kl_config_hdr_t structure --- */
 
 #define PTR_CH_MALLOC_HDR(_k)   ((klc_malloc_hdr_t *)\
-                       (unsigned long)_k + (_k->ch_malloc_hdr_off)))
+                       ((unsigned long)_k + (_k->ch_malloc_hdr_off)))
 
 #define KL_CONFIG_CH_MALLOC_HDR(_n)   PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
 
index c8d5587467bbf6181ba2950fa66bf63c8dcc3858..fc3217fc1118deabd9c4c40cb3442cdf1573c2ab 100644 (file)
@@ -3,12 +3,13 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
+#include <asm/barrier.h>
 #include <asm/war.h>
 
 /*
@@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
-               "       sync                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (lock->lock), "=&r" (tmp)
                : "m" (lock->lock)
@@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
                "        li     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (lock->lock), "=&r" (tmp)
                : "m" (lock->lock)
                : "memory");
        }
+
+       smp_mb();
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
+       smp_mb();
+
        __asm__ __volatile__(
        "       .set    noreorder       # __raw_spin_unlock     \n"
-       "       sync                                            \n"
        "       sw      $0, %0                                  \n"
        "       .set\treorder                                   \n"
        : "=m" (lock->lock)
@@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
                "       beqzl   %2, 1b                                  \n"
                "        nop                                            \n"
                "       andi    %2, %0, 1                               \n"
-               "       sync                                            \n"
                "       .set    reorder"
                : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
                : "m" (lock->lock)
@@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
                "       sc      %2, %1                                  \n"
                "       beqz    %2, 1b                                  \n"
                "        andi   %2, %0, 1                               \n"
-               "       sync                                            \n"
                "       .set    reorder"
                : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
                : "m" (lock->lock)
                : "memory");
        }
 
+       smp_mb();
+
        return res == 0;
 }
 
@@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
-               "       sync                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
@@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
                "        addu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
                : "memory");
        }
+
+       smp_mb();
 }
 
 /* Note the use of sub, not subu which will make the kernel die with an
@@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
+       smp_mb();
+
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
                "1:     ll      %1, %2          # __raw_read_unlock     \n"
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               "       sync                                            \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
                : "memory");
@@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
                "       sub     %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
@@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
                "        lui    %1, 0x8000                              \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
@@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
                "        lui    %1, 0x8000                              \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
                : "=m" (rw->lock), "=&r" (tmp)
                : "m" (rw->lock)
                : "memory");
        }
+
+       smp_mb();
 }
 
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
 {
+       smp_mb();
+
        __asm__ __volatile__(
-       "       sync                    # __raw_write_unlock    \n"
+       "                               # __raw_write_unlock    \n"
        "       sw      $0, %0                                  \n"
        : "=m" (rw->lock)
        : "m" (rw->lock)
@@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
                "       bnez    %1, 2f                                  \n"
                "        addu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
-               "       beqzl   %1, 1b                                  \n"
                "       .set    reorder                                 \n"
-#ifdef CONFIG_SMP
-               "        sync                                           \n"
-#endif
+               "       beqzl   %1, 1b                                  \n"
+               "        nop                                            \n"
+               __WEAK_ORDERING_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
                "        addu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
+               "        nop                                            \n"
                "       .set    reorder                                 \n"
-#ifdef CONFIG_SMP
-               "        sync                                           \n"
-#endif
+               __WEAK_ORDERING_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
                "        lui    %1, 0x8000                              \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
+               __WEAK_ORDERING_MB
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
@@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
                "       lui     %1, 0x8000                              \n"
                "       sc      %1, %0                                  \n"
                "       beqz    %1, 1b                                  \n"
-               "        sync                                           \n"
+               "        nop                                            \n"
+               __WEAK_ORDERING_MB
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
index 3056feed5a367bcee7f47ec346f3ee62415c6abd..9428057a50cfa63b996c7f50fe10bea49bb069bc 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
  * Copyright (C) 1996 by Paul M. Antoine
  * Copyright (C) 1999 Silicon Graphics
  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
 #include <linux/irqflags.h>
 
 #include <asm/addrspace.h>
+#include <asm/barrier.h>
 #include <asm/cpu-features.h>
 #include <asm/dsp.h>
 #include <asm/ptrace.h>
 #include <asm/war.h>
 
-/*
- * read_barrier_depends - Flush all pending reads that subsequents reads
- * depend on.
- *
- * No data-dependent reads from memory-like regions are ever reordered
- * over this barrier.  All reads preceding this primitive are guaranteed
- * to access memory (but not necessarily other CPUs' caches) before any
- * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
- * rmb() on most CPUs, and is never heavier weight than is
- * rmb().
- *
- * These ordering constraints are respected by both the local CPU
- * and the compiler.
- *
- * Ordering is not guaranteed by anything other than these primitives,
- * not even by data dependencies.  See the documentation for
- * memory_barrier() for examples and URLs to more information.
- *
- * For example, the following code would force ordering (the initial
- * value of "a" is zero, "b" is one, and "p" is "&a"):
- *
- * <programlisting>
- *     CPU 0                           CPU 1
- *
- *     b = 2;
- *     memory_barrier();
- *     p = &b;                         q = p;
- *                                     read_barrier_depends();
- *                                     d = *q;
- * </programlisting>
- *
- * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
- * the following code, with the same initial values for "a" and "b":
- *
- * <programlisting>
- *     CPU 0                           CPU 1
- *
- *     a = 2;
- *     memory_barrier();
- *     b = 3;                          y = b;
- *                                     read_barrier_depends();
- *                                     x = a;
- * </programlisting>
- *
- * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
- * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
- * in cases like this where there are no data dependencies.
- */
-
-#define read_barrier_depends() do { } while(0)
-
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync()                               \
-       __asm__ __volatile__(                   \
-               ".set   push\n\t"               \
-               ".set   noreorder\n\t"          \
-               ".set   mips2\n\t"              \
-               "sync\n\t"                      \
-               ".set   pop"                    \
-               : /* no output */               \
-               : /* no input */                \
-               : "memory")
-#else
-#define __sync()       do { } while(0)
-#endif
-
-#define __fast_iob()                           \
-       __asm__ __volatile__(                   \
-               ".set   push\n\t"               \
-               ".set   noreorder\n\t"          \
-               "lw     $0,%0\n\t"              \
-               "nop\n\t"                       \
-               ".set   pop"                    \
-               : /* no output */               \
-               : "m" (*(int *)CKSEG1)          \
-               : "memory")
-
-#define fast_wmb()     __sync()
-#define fast_rmb()     __sync()
-#define fast_mb()      __sync()
-#define fast_iob()                             \
-       do {                                    \
-               __sync();                       \
-               __fast_iob();                   \
-       } while (0)
-
-#ifdef CONFIG_CPU_HAS_WB
-
-#include <asm/wbflush.h>
-
-#define wmb()          fast_wmb()
-#define rmb()          fast_rmb()
-#define mb()           wbflush()
-#define iob()          wbflush()
-
-#else /* !CONFIG_CPU_HAS_WB */
-
-#define wmb()          fast_wmb()
-#define rmb()          fast_rmb()
-#define mb()           fast_mb()
-#define iob()          fast_iob()
-
-#endif /* !CONFIG_CPU_HAS_WB */
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
-#endif
-
-#define set_mb(var, value) \
-do { var = value; mb(); } while (0)
 
 /*
  * switch_to(n) should switch tasks to task nr n, first
@@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                "       .set    mips3                                   \n"
                "       sc      %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                : "R" (*m), "Jr" (val)
@@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                "       .set    mips3                                   \n"
                "       sc      %2, %1                                  \n"
                "       beqz    %2, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                : "R" (*m), "Jr" (val)
@@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                local_irq_restore(flags);       /* implies memory barrier  */
        }
 
+       smp_mb();
+
        return retval;
 }
 
@@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                "       move    %2, %z4                                 \n"
                "       scd     %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                : "R" (*m), "Jr" (val)
@@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                "       move    %2, %z4                                 \n"
                "       scd     %2, %1                                  \n"
                "       beqz    %2, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "       .set    mips0                                   \n"
                : "=&r" (retval), "=m" (*m), "=&r" (dummy)
                : "R" (*m), "Jr" (val)
@@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                local_irq_restore(flags);       /* implies memory barrier  */
        }
 
+       smp_mb();
+
        return retval;
 }
 #else
@@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                "       .set    mips3                                   \n"
                "       sc      $1, %1                                  \n"
                "       beqzl   $1, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (retval), "=R" (*m)
@@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                "       .set    mips3                                   \n"
                "       sc      $1, %1                                  \n"
                "       beqz    $1, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (retval), "=R" (*m)
@@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                local_irq_restore(flags);       /* implies memory barrier  */
        }
 
+       smp_mb();
+
        return retval;
 }
 
@@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
                "       move    $1, %z4                                 \n"
                "       scd     $1, %1                                  \n"
                "       beqzl   $1, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (retval), "=R" (*m)
@@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
                "       move    $1, %z4                                 \n"
                "       scd     $1, %1                                  \n"
                "       beqz    $1, 1b                                  \n"
-#ifdef CONFIG_SMP
-               "       sync                                            \n"
-#endif
                "2:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (retval), "=R" (*m)
@@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
                local_irq_restore(flags);       /* implies memory barrier  */
        }
 
+       smp_mb();
+
        return retval;
 }
 #else
index 2b52e180c6f25d75c28911ca68affdf41164afff..63a13c5bd8324ea0527779bcb575b2fb99026a6b 100644 (file)
@@ -93,16 +93,6 @@ typedef unsigned long long phys_t;
 typedef unsigned long phys_t;
 #endif
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index ec56aa52f669f2092847290b9fccb8309d6ae35e..696cff39a1d3e38dc7de8e49e77f55ebc172281d 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-       register unsigned long __a3 asm("$7"); \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %2\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "=r" (__a3) \
-       : "i" (__NR_##name) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-/*
- * DANGER: This macro isn't usable for the pipe(2) call
- * which has a unusual return convention.
- */
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a3 asm("$7"); \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %3\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "=r" (__a3) \
-       : "r" (__a0), "i" (__NR_##name) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a, btype b) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a3 asm("$7"); \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %4\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "=r" (__a3) \
-       : "r" (__a0), "r" (__a1), "i" (__NR_##name) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a, btype b, ctype c) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7"); \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %5\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "=r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7") = (unsigned long) d; \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %5\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "+r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#if (_MIPS_SIM == _MIPS_SIM_ABI32)
-
-/*
- * Using those means your brain needs more than an oil change ;-)
- */
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7") = (unsigned long) d; \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "lw\t$2, %6\n\t" \
-       "subu\t$29, 32\n\t" \
-       "sw\t$2, 16($29)\n\t" \
-       "li\t$2, %5\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       "addiu\t$29, 32\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "+r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-         "m" ((unsigned long)e) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7") = (unsigned long) d; \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "lw\t$2, %6\n\t" \
-       "lw\t$8, %7\n\t" \
-       "subu\t$29, 32\n\t" \
-       "sw\t$2, 16($29)\n\t" \
-       "sw\t$8, 20($29)\n\t" \
-       "li\t$2, %5\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       "addiu\t$29, 32\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "+r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
-         "m" ((unsigned long)e), "m" ((unsigned long)f) \
-       : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
-
-#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7") = (unsigned long) d; \
-       register unsigned long __a4 asm("$8") = (unsigned long) e; \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %6\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "+r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
-       : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
-{ \
-       register unsigned long __a0 asm("$4") = (unsigned long) a; \
-       register unsigned long __a1 asm("$5") = (unsigned long) b; \
-       register unsigned long __a2 asm("$6") = (unsigned long) c; \
-       register unsigned long __a3 asm("$7") = (unsigned long) d; \
-       register unsigned long __a4 asm("$8") = (unsigned long) e; \
-       register unsigned long __a5 asm("$9") = (unsigned long) f; \
-       unsigned long __v0; \
-       \
-       __asm__ volatile ( \
-       ".set\tnoreorder\n\t" \
-       "li\t$2, %7\t\t\t# " #name "\n\t" \
-       "syscall\n\t" \
-       "move\t%0, $2\n\t" \
-       ".set\treorder" \
-       : "=&r" (__v0), "+r" (__a3) \
-       : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
-         "i" (__NR_##name) \
-       : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
-         "memory"); \
-       \
-       if (__a3 == 0) \
-               return (type) __v0; \
-       errno = __v0; \
-       return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
-
-
 #define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index 1e387e1dad3044a99a718c7ca5b86142b4a0b844..66f0b408c66907cd90b42ee8861507760ac5b67d 100644 (file)
@@ -191,13 +191,13 @@ dma_get_cache_alignment(void)
 }
 
 static inline int
-dma_is_consistent(dma_addr_t dma_addr)
+dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
 {
        return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        if(hppa_dma_ops->dma_sync_single_for_cpu)
index d84bbb283fd17c7a8e9fb92d4a30a660efe86263..dbee6e60aa8139e66897ba65ba4ac9155bf00dab 100644 (file)
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index 9827849953a364cb252ca596e906f82e3ad09491..1e637381c118db47c3694253bf6bfbdb75fe9a92 100644 (file)
@@ -17,6 +17,7 @@ header-y += ipc.h
 header-y += poll.h
 header-y += shmparam.h
 header-y += sockios.h
+header-y += spu_info.h
 header-y += ucontext.h
 header-y += ioctl.h
 header-y += linkage.h
diff --git a/include/asm-powerpc/cell-pmu.h b/include/asm-powerpc/cell-pmu.h
new file mode 100644 (file)
index 0000000..e8c2ebd
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Cell Broadband Engine Performance Monitor
+ *
+ * (C) Copyright IBM Corporation 2006
+ *
+ * Author:
+ *   David Erb (djerb@us.ibm.com)
+ *   Kevin Corry (kevcorry@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ASM_CELL_PMU_H__
+#define __ASM_CELL_PMU_H__
+
+/* The Cell PMU has four hardware performance counters, which can be
+ * configured as four 32-bit counters or eight 16-bit counters.
+ */
+#define NR_PHYS_CTRS 4
+#define NR_CTRS      (NR_PHYS_CTRS * 2)
+
+/* Macros for the pm_control register. */
+#define CBE_PM_16BIT_CTR(ctr)              (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
+#define CBE_PM_ENABLE_PERF_MON             0x80000000
+#define CBE_PM_STOP_AT_MAX                 0x40000000
+#define CBE_PM_TRACE_MODE_GET(pm_control)  (((pm_control) >> 28) & 0x3)
+#define CBE_PM_TRACE_MODE_SET(mode)        (((mode)  & 0x3) << 28)
+#define CBE_PM_COUNT_MODE_SET(count)       (((count) & 0x3) << 18)
+#define CBE_PM_FREEZE_ALL_CTRS             0x00100000
+#define CBE_PM_ENABLE_EXT_TRACE            0x00008000
+
+/* Macros for the trace_address register. */
+#define CBE_PM_TRACE_BUF_FULL              0x00000800
+#define CBE_PM_TRACE_BUF_EMPTY             0x00000400
+#define CBE_PM_TRACE_BUF_DATA_COUNT(ta)    ((ta) & 0x3ff)
+#define CBE_PM_TRACE_BUF_MAX_COUNT         0x400
+
+/* Macros for the pm07_control registers. */
+#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
+#define CBE_PM_CTR_INPUT_CONTROL           0x02000000
+#define CBE_PM_CTR_POLARITY                0x01000000
+#define CBE_PM_CTR_COUNT_CYCLES            0x00800000
+#define CBE_PM_CTR_ENABLE                  0x00400000
+
+/* Macros for the pm_status register. */
+#define CBE_PM_CTR_OVERFLOW_INTR(ctr)      (1 << (31 - ((ctr) & 7)))
+
+enum pm_reg_name {
+       group_control,
+       debug_bus_control,
+       trace_address,
+       ext_tr_timer,
+       pm_status,
+       pm_control,
+       pm_interval,
+       pm_start_stop,
+};
+
+/* Routines for reading/writing the PMU registers. */
+extern u32  cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
+extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
+extern u32  cbe_read_ctr(u32 cpu, u32 ctr);
+extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
+
+extern u32  cbe_read_pm07_control(u32 cpu, u32 ctr);
+extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
+extern u32  cbe_read_pm(u32 cpu, enum pm_reg_name reg);
+extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
+
+extern u32  cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
+extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
+
+extern void cbe_enable_pm(u32 cpu);
+extern void cbe_disable_pm(u32 cpu);
+
+extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
+
+extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
+extern void cbe_disable_pm_interrupts(u32 cpu);
+extern u32  cbe_query_pm_interrupts(u32 cpu);
+extern u32  cbe_clear_pm_interrupts(u32 cpu);
+extern void cbe_sync_irq(int node);
+
+/* Utility functions, macros */
+extern u32 cbe_get_hw_thread_id(int cpu);
+
+#define cbe_cpu_to_node(cpu) ((cpu) >> 1)
+
+#define CBE_COUNT_SUPERVISOR_MODE       0
+#define CBE_COUNT_HYPERVISOR_MODE       1
+#define CBE_COUNT_PROBLEM_MODE          2
+#define CBE_COUNT_ALL_MODES             3
+
+/* Macros for the pm07_control registers. */
+#define PM07_CTR_INPUT_MUX(x)                    (((x) & 0x3F) << 26)
+#define PM07_CTR_INPUT_CONTROL(x)                (((x) & 1) << 25)
+#define PM07_CTR_POLARITY(x)                     (((x) & 1) << 24)
+#define PM07_CTR_COUNT_CYCLES(x)                 (((x) & 1) << 23)
+#define PM07_CTR_ENABLE(x)                       (((x) & 1) << 22)
+
+#endif /* __ASM_CELL_PMU_H__ */
index a9a40149a7c0daf66cca975035b4d5863bbc10bc..6fe5c9d4ca3b946e2ad5f348070851b6e039d086 100644 (file)
@@ -24,6 +24,8 @@
 #define PPC_FEATURE_ICACHE_SNOOP       0x00002000
 #define PPC_FEATURE_ARCH_2_05          0x00001000
 #define PPC_FEATURE_PA6T               0x00000800
+#define PPC_FEATURE_HAS_DFP            0x00000400
+#define PPC_FEATURE_POWER6_EXT         0x00000200
 
 #define PPC_FEATURE_TRUE_LE            0x00000002
 #define PPC_FEATURE_PPC_LE             0x00000001
@@ -45,6 +47,7 @@ enum powerpc_oprofile_type {
        PPC_OPROFILE_POWER4 = 2,
        PPC_OPROFILE_G4 = 3,
        PPC_OPROFILE_BOOKE = 4,
+       PPC_OPROFILE_CELL = 5,
 };
 
 struct cpu_spec {
@@ -91,7 +94,7 @@ extern struct cpu_spec                *cur_cpu_spec;
 
 extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
 
-extern struct cpu_spec *identify_cpu(unsigned long offset);
+extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
 extern void do_feature_fixups(unsigned long value, void *fixup_start,
                              void *fixup_end);
 
@@ -148,19 +151,13 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTR_PAUSE_ZERO             LONG_ASM_CONST(0x0000200000000000)
 #define CPU_FTR_PURR                   LONG_ASM_CONST(0x0000400000000000)
 #define CPU_FTR_CELL_TB_BUG            LONG_ASM_CONST(0x0000800000000000)
+#define CPU_FTR_SPURR                  LONG_ASM_CONST(0x0001000000000000)
 
 #ifndef __ASSEMBLY__
 
-#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
-                                       CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
-                                       CPU_FTR_NODSISRALIGN)
-
-/* iSeries doesn't support large pages */
-#ifdef CONFIG_PPC_ISERIES
-#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE)
-#else
-#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
-#endif /* CONFIG_PPC_ISERIES */
+#define CPU_FTR_PPCAS_ARCH_V2  (CPU_FTR_SLB | \
+                                CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+                                CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE)
 
 /* We only set the altivec features if the kernel was compiled with altivec
  * support
@@ -311,7 +308,8 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
 #define CPU_FTRS_E500_2        (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN)
 #define CPU_FTRS_GENERIC_32    (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
-#ifdef __powerpc64__
+
+/* 64-bit CPUs */
 #define CPU_FTRS_POWER3        (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
 #define CPU_FTRS_RS64  (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
@@ -332,7 +330,13 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_MMCRA | CPU_FTR_SMT | \
            CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-           CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_REAL_LE)
+           CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE)
+#define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
+           CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+           CPU_FTR_MMCRA | CPU_FTR_SMT | \
+           CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
+           CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \
+           CPU_FTR_SPURR | CPU_FTR_REAL_LE)
 #define CPU_FTRS_CELL  (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -343,7 +347,6 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
            CPU_FTR_PURR | CPU_FTR_REAL_LE)
 #define CPU_FTRS_COMPATIBLE    (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
            CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
-#endif
 
 #ifdef __powerpc64__
 #define CPU_FTRS_POSSIBLE      \
index 8973565f95d39f38da42c49ddd6b9110e5687e7e..e23f07e73cb305ad326a8fdce8fa290f8e3248a7 100644 (file)
@@ -95,7 +95,13 @@ struct dbdma_cmd {
 #define DBDMA_DO_STOP(regs) do {                               \
        out_le32(&((regs)->control), (RUN|FLUSH)<<16);          \
        while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH))      \
-               ;                                               \
+               ; \
+} while(0)
+
+#define DBDMA_DO_RESET(regs) do {                              \
+       out_le32(&((regs)->control), (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);\
+       while(in_le32(&((regs)->status)) & (RUN)) \
+               ; \
 } while(0)
 
 #endif /* _ASM_DBDMA_H_ */
diff --git a/include/asm-powerpc/dcr-mmio.h b/include/asm-powerpc/dcr-mmio.h
new file mode 100644 (file)
index 0000000..5dbfca8
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_MMIO_H
+#define _ASM_POWERPC_DCR_MMIO_H
+#ifdef __KERNEL__
+
+#include <asm/io.h>
+
+typedef struct { void __iomem *token; unsigned int stride; } dcr_host_t;
+
+#define DCR_MAP_OK(host)       ((host).token != NULL)
+
+extern dcr_host_t dcr_map(struct device_node *dev, unsigned int dcr_n,
+                         unsigned int dcr_c);
+extern void dcr_unmap(dcr_host_t host, unsigned int dcr_n, unsigned int dcr_c);
+
+static inline u32 dcr_read(dcr_host_t host, unsigned int dcr_n)
+{
+       return in_be32(host.token + dcr_n * host.stride);
+}
+
+static inline void dcr_write(dcr_host_t host, unsigned int dcr_n, u32 value)
+{
+       out_be32(host.token + dcr_n * host.stride, value);
+}
+
+extern u64 of_translate_dcr_address(struct device_node *dev,
+                                   unsigned int dcr_n,
+                                   unsigned int *stride);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_MMIO_H */
+
+
diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h
new file mode 100644 (file)
index 0000000..fd4a5f5
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_NATIVE_H
+#define _ASM_POWERPC_DCR_NATIVE_H
+#ifdef __KERNEL__
+
+#include <asm/reg.h>
+
+typedef struct {} dcr_host_t;
+
+#define DCR_MAP_OK(host)       (1)
+
+#define dcr_map(dev, dcr_n, dcr_c)     {}
+#define dcr_unmap(host, dcr_n, dcr_c)  {}
+#define dcr_read(host, dcr_n)          mfdcr(dcr_n)
+#define dcr_write(host, dcr_n, value)  mtdcr(dcr_n, value)
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_NATIVE_H */
+
+
diff --git a/include/asm-powerpc/dcr.h b/include/asm-powerpc/dcr.h
new file mode 100644 (file)
index 0000000..473f2c7
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                    <benh@kernel.crashing.org>
+ *
+ *   This program is free software;  you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program;  if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_POWERPC_DCR_H
+#define _ASM_POWERPC_DCR_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PPC_DCR_NATIVE
+#include <asm/dcr-native.h>
+#else
+#include <asm/dcr-mmio.h>
+#endif
+
+/*
+ * On CONFIG_PPC_MERGE, we have additional helpers to read the DCR
+ * base from the device-tree
+ */
+#ifdef CONFIG_PPC_MERGE
+extern unsigned int dcr_resource_start(struct device_node *np,
+                                      unsigned int index);
+extern unsigned int dcr_resource_len(struct device_node *np,
+                                    unsigned int index);
+#endif /* CONFIG_PPC_MERGE */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_DCR_H */
index d8f9872b0e2dc3587a9e658adc957f093b7906fb..228ab2a315b9cd1354004bb96a77419574c52af8 100644 (file)
@@ -3,5 +3,22 @@
  *
  * This file is released under the GPLv2
  */
-#include <asm-generic/device.h>
+#ifndef _ASM_POWERPC_DEVICE_H
+#define _ASM_POWERPC_DEVICE_H
 
+struct dma_mapping_ops;
+struct device_node;
+
+struct dev_archdata {
+       /* Optional pointer to an OF device node */
+       struct device_node      *of_node;
+
+       /* DMA operations on that device */
+       struct dma_mapping_ops  *dma_ops;
+       void                    *dma_data;
+
+       /* NUMA node if applicable */
+       int                     numa_node;
+};
+
+#endif /* _ASM_POWERPC_DEVICE_H */
index 2ab9baf78bb4e40514bf317296903097ba110d47..7c7de87bd8ae55391e703a5a2a76fa0866e3238b 100644 (file)
@@ -44,26 +44,150 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
 #endif /* ! CONFIG_NOT_COHERENT_CACHE */
 
 #ifdef CONFIG_PPC64
+/*
+ * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
+ */
+struct dma_mapping_ops {
+       void *          (*alloc_coherent)(struct device *dev, size_t size,
+                               dma_addr_t *dma_handle, gfp_t flag);
+       void            (*free_coherent)(struct device *dev, size_t size,
+                               void *vaddr, dma_addr_t dma_handle);
+       dma_addr_t      (*map_single)(struct device *dev, void *ptr,
+                               size_t size, enum dma_data_direction direction);
+       void            (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
+                               size_t size, enum dma_data_direction direction);
+       int             (*map_sg)(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction direction);
+       void            (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+                               int nents, enum dma_data_direction direction);
+       int             (*dma_supported)(struct device *dev, u64 mask);
+       int             (*dac_dma_supported)(struct device *dev, u64 mask);
+       int             (*set_dma_mask)(struct device *dev, u64 dma_mask);
+};
+
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
+{
+       /* We don't handle the NULL dev case for ISA for now. We could
+        * do it via an out of line call but it is not needed for now. The
+        * only ISA DMA device we support is the floppy and we have a hack
+        * in the floppy driver directly to get a device for us.
+        */
+       if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
+               return NULL;
+       return dev->archdata.dma_ops;
+}
 
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-               size_t size, enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
-               size_t size, enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size,
-               enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
-               size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-               enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-               int nhwentries, enum dma_data_direction direction);
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (unlikely(dma_ops == NULL))
+               return 0;
+       if (dma_ops->dma_supported == NULL)
+               return 1;
+       return dma_ops->dma_supported(dev, mask);
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       if (unlikely(dma_ops == NULL))
+               return -EIO;
+       if (dma_ops->set_dma_mask != NULL)
+               return dma_ops->set_dma_mask(dev, dma_mask);
+       if (!dev->dma_mask || !dma_supported(dev, *dev->dma_mask))
+               return -EIO;
+       *dev->dma_mask = dma_mask;
+       return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t flag)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+                                    void *cpu_addr, dma_addr_t dma_handle)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+                                       size_t size,
+                                       enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       return dma_ops->map_single(dev, cpu_addr, size, direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+                                   size_t size,
+                                   enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       dma_ops->unmap_single(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     unsigned long offset, size_t size,
+                                     enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       return dma_ops->map_single(dev, page_address(page) + offset, size,
+                       direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+                                 size_t size,
+                                 enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       dma_ops->unmap_single(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+                            int nents, enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                               int nhwentries,
+                               enum dma_data_direction direction)
+{
+       struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+       BUG_ON(!dma_ops);
+       dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+}
+
+
+/*
+ * Available generic sets of operations
+ */
+extern struct dma_mapping_ops dma_iommu_ops;
+extern struct dma_mapping_ops dma_direct_ops;
+
+extern unsigned long dma_direct_offset;
 
 #else /* CONFIG_PPC64 */
 
@@ -218,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 #ifdef CONFIG_NOT_COHERENT_CACHE
-#define dma_is_consistent(d)   (0)
+#define dma_is_consistent(d, h)        (0)
 #else
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 #endif
 
 static inline int dma_get_cache_alignment(void)
@@ -254,32 +378,12 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
        dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                enum dma_data_direction direction)
 {
        BUG_ON(direction == DMA_NONE);
        __dma_sync(vaddr, size, (int)direction);
 }
 
-/*
- * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
- */
-struct dma_mapping_ops {
-       void *          (*alloc_coherent)(struct device *dev, size_t size,
-                               dma_addr_t *dma_handle, gfp_t flag);
-       void            (*free_coherent)(struct device *dev, size_t size,
-                               void *vaddr, dma_addr_t dma_handle);
-       dma_addr_t      (*map_single)(struct device *dev, void *ptr,
-                               size_t size, enum dma_data_direction direction);
-       void            (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
-                               size_t size, enum dma_data_direction direction);
-       int             (*map_sg)(struct device *dev, struct scatterlist *sg,
-                               int nents, enum dma_data_direction direction);
-       void            (*unmap_sg)(struct device *dev, struct scatterlist *sg,
-                               int nents, enum dma_data_direction direction);
-       int             (*dma_supported)(struct device *dev, u64 mask);
-       int             (*dac_dma_supported)(struct device *dev, u64 mask);
-};
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_DMA_MAPPING_H */
index 6a784396660bf78a00145b07e9f35c410438898e..b886bec67016795582d1922b3ccc2c0913c8f878 100644 (file)
@@ -120,10 +120,6 @@ static inline u8 eeh_readb(const volatile void __iomem *addr)
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_writeb(u8 val, volatile void __iomem *addr)
-{
-       out_8(addr, val);
-}
 
 static inline u16 eeh_readw(const volatile void __iomem *addr)
 {
@@ -132,21 +128,6 @@ static inline u16 eeh_readw(const volatile void __iomem *addr)
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_writew(u16 val, volatile void __iomem *addr)
-{
-       out_le16(addr, val);
-}
-static inline u16 eeh_raw_readw(const volatile void __iomem *addr)
-{
-       u16 val = in_be16(addr);
-       if (EEH_POSSIBLE_ERROR(val, u16))
-               return eeh_check_failure(addr, val);
-       return val;
-}
-static inline void eeh_raw_writew(u16 val, volatile void __iomem *addr) {
-       volatile u16 __iomem *vaddr = (volatile u16 __iomem *) addr;
-       out_be16(vaddr, val);
-}
 
 static inline u32 eeh_readl(const volatile void __iomem *addr)
 {
@@ -155,205 +136,75 @@ static inline u32 eeh_readl(const volatile void __iomem *addr)
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_writel(u32 val, volatile void __iomem *addr)
-{
-       out_le32(addr, val);
-}
-static inline u32 eeh_raw_readl(const volatile void __iomem *addr)
+
+static inline u64 eeh_readq(const volatile void __iomem *addr)
 {
-       u32 val = in_be32(addr);
-       if (EEH_POSSIBLE_ERROR(val, u32))
+       u64 val = in_le64(addr);
+       if (EEH_POSSIBLE_ERROR(val, u64))
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_raw_writel(u32 val, volatile void __iomem *addr)
-{
-       out_be32(addr, val);
-}
 
-static inline u64 eeh_readq(const volatile void __iomem *addr)
+static inline u16 eeh_readw_be(const volatile void __iomem *addr)
 {
-       u64 val = in_le64(addr);
-       if (EEH_POSSIBLE_ERROR(val, u64))
+       u16 val = in_be16(addr);
+       if (EEH_POSSIBLE_ERROR(val, u16))
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_writeq(u64 val, volatile void __iomem *addr)
+
+static inline u32 eeh_readl_be(const volatile void __iomem *addr)
 {
-       out_le64(addr, val);
+       u32 val = in_be32(addr);
+       if (EEH_POSSIBLE_ERROR(val, u32))
+               return eeh_check_failure(addr, val);
+       return val;
 }
-static inline u64 eeh_raw_readq(const volatile void __iomem *addr)
+
+static inline u64 eeh_readq_be(const volatile void __iomem *addr)
 {
        u64 val = in_be64(addr);
        if (EEH_POSSIBLE_ERROR(val, u64))
                return eeh_check_failure(addr, val);
        return val;
 }
-static inline void eeh_raw_writeq(u64 val, volatile void __iomem *addr)
-{
-       out_be64(addr, val);
-}
-
-#define EEH_CHECK_ALIGN(v,a) \
-       ((((unsigned long)(v)) & ((a) - 1)) == 0)
 
-static inline void eeh_memset_io(volatile void __iomem *addr, int c,
-                                unsigned long n)
-{
-       void *p = (void __force *)addr;
-       u32 lc = c;
-       lc |= lc << 8;
-       lc |= lc << 16;
-
-       __asm__ __volatile__ ("sync" : : : "memory");
-       while(n && !EEH_CHECK_ALIGN(p, 4)) {
-               *((volatile u8 *)p) = c;
-               p++;
-               n--;
-       }
-       while(n >= 4) {
-               *((volatile u32 *)p) = lc;
-               p += 4;
-               n -= 4;
-       }
-       while(n) {
-               *((volatile u8 *)p) = c;
-               p++;
-               n--;
-       }
-       __asm__ __volatile__ ("sync" : : : "memory");
-}
-static inline void eeh_memcpy_fromio(void *dest, const volatile void __iomem *src,
+static inline void eeh_memcpy_fromio(void *dest, const
+                                    volatile void __iomem *src,
                                     unsigned long n)
 {
-       void *vsrc = (void __force *) src;
-       void *destsave = dest;
-       unsigned long nsave = n;
-
-       __asm__ __volatile__ ("sync" : : : "memory");
-       while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
-               *((u8 *)dest) = *((volatile u8 *)vsrc);
-               __asm__ __volatile__ ("eieio" : : : "memory");
-               vsrc++;
-               dest++;
-               n--;
-       }
-       while(n > 4) {
-               *((u32 *)dest) = *((volatile u32 *)vsrc);
-               __asm__ __volatile__ ("eieio" : : : "memory");
-               vsrc += 4;
-               dest += 4;
-               n -= 4;
-       }
-       while(n) {
-               *((u8 *)dest) = *((volatile u8 *)vsrc);
-               __asm__ __volatile__ ("eieio" : : : "memory");
-               vsrc++;
-               dest++;
-               n--;
-       }
-       __asm__ __volatile__ ("sync" : : : "memory");
+       _memcpy_fromio(dest, src, n);
 
        /* Look for ffff's here at dest[n].  Assume that at least 4 bytes
         * were copied. Check all four bytes.
         */
-       if ((nsave >= 4) &&
-               (EEH_POSSIBLE_ERROR((*((u32 *) destsave+nsave-4)), u32))) {
-               eeh_check_failure(src, (*((u32 *) destsave+nsave-4)));
-       }
-}
-
-static inline void eeh_memcpy_toio(volatile void __iomem *dest, const void *src,
-                                  unsigned long n)
-{
-       void *vdest = (void __force *) dest;
-
-       __asm__ __volatile__ ("sync" : : : "memory");
-       while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
-               *((volatile u8 *)vdest) = *((u8 *)src);
-               src++;
-               vdest++;
-               n--;
-       }
-       while(n > 4) {
-               *((volatile u32 *)vdest) = *((volatile u32 *)src);
-               src += 4;
-               vdest += 4;
-               n-=4;
-       }
-       while(n) {
-               *((volatile u8 *)vdest) = *((u8 *)src);
-               src++;
-               vdest++;
-               n--;
-       }
-       __asm__ __volatile__ ("sync" : : : "memory");
-}
-
-#undef EEH_CHECK_ALIGN
-
-static inline u8 eeh_inb(unsigned long port)
-{
-       u8 val;
-       val = in_8((u8 __iomem *)(port+pci_io_base));
-       if (EEH_POSSIBLE_ERROR(val, u8))
-               return eeh_check_failure((void __iomem *)(port), val);
-       return val;
-}
-
-static inline void eeh_outb(u8 val, unsigned long port)
-{
-       out_8((u8 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u16 eeh_inw(unsigned long port)
-{
-       u16 val;
-       val = in_le16((u16 __iomem *)(port+pci_io_base));
-       if (EEH_POSSIBLE_ERROR(val, u16))
-               return eeh_check_failure((void __iomem *)(port), val);
-       return val;
-}
-
-static inline void eeh_outw(u16 val, unsigned long port)
-{
-       out_le16((u16 __iomem *)(port+pci_io_base), val);
-}
-
-static inline u32 eeh_inl(unsigned long port)
-{
-       u32 val;
-       val = in_le32((u32 __iomem *)(port+pci_io_base));
-       if (EEH_POSSIBLE_ERROR(val, u32))
-               return eeh_check_failure((void __iomem *)(port), val);
-       return val;
-}
-
-static inline void eeh_outl(u32 val, unsigned long port)
-{
-       out_le32((u32 __iomem *)(port+pci_io_base), val);
+       if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
+               eeh_check_failure(src, *((u32 *)(dest + n - 4)));
 }
 
 /* in-string eeh macros */
-static inline void eeh_insb(unsigned long port, void * buf, int ns)
+static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
+                             int ns)
 {
-       _insb((u8 __iomem *)(port+pci_io_base), buf, ns);
+       _insb(addr, buf, ns);
        if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
-               eeh_check_failure((void __iomem *)(port), *(u8*)buf);
+               eeh_check_failure(addr, *(u8*)buf);
 }
 
-static inline void eeh_insw_ns(unsigned long port, void * buf, int ns)
+static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
+                             int ns)
 {
-       _insw_ns((u16 __iomem *)(port+pci_io_base), buf, ns);
+       _insw(addr, buf, ns);
        if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
-               eeh_check_failure((void __iomem *)(port), *(u16*)buf);
+               eeh_check_failure(addr, *(u16*)buf);
 }
 
-static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
+static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
+                             int nl)
 {
-       _insl_ns((u32 __iomem *)(port+pci_io_base), buf, nl);
+       _insl(addr, buf, nl);
        if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
-               eeh_check_failure((void __iomem *)(port), *(u32*)buf);
+               eeh_check_failure(addr, *(u32*)buf);
 }
 
 #endif /* __KERNEL__ */
index 9a83a987d3968415b62216129c79c1cd4588f68f..d36426c01b6b95efe784c7d567b5f58a2212ecbf 100644 (file)
@@ -124,12 +124,10 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
 # define ELF_DATA      ELFDATA2MSB
   typedef elf_greg_t64 elf_greg_t;
   typedef elf_gregset_t64 elf_gregset_t;
-# define elf_addr_t unsigned long
 #else
   /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
   typedef elf_greg_t32 elf_greg_t;
   typedef elf_gregset_t32 elf_gregset_t;
-# define elf_addr_t __u32
 #endif /* ELF_ARCH */
 
 /* Floating point registers */
@@ -411,4 +409,17 @@ do {                                                                       \
 /* Keep this the last entry.  */
 #define R_PPC64_NUM            107
 
+#ifdef CONFIG_SPU_BASE
+/* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
+#define NT_SPU         1
+
+extern int arch_notes_size(void);
+extern void arch_write_notes(struct file *file);
+
+#define ELF_CORE_EXTRA_NOTES_SIZE arch_notes_size()
+#define ELF_CORE_WRITE_EXTRA_NOTES arch_write_notes(file)
+
+#define ARCH_HAVE_EXTRA_ELF_NOTES
+#endif /* CONFIG_PPC_CELL */
+
 #endif /* _ASM_POWERPC_ELF_H */
index fdf9aff71150412a9d351241b3c10d65652eb38a..98f7b62422c9751d9f398f5f276afd5a450f52bc 100644 (file)
@@ -42,6 +42,7 @@
 #define FW_FEATURE_SPLPAR      ASM_CONST(0x0000000000100000)
 #define FW_FEATURE_ISERIES     ASM_CONST(0x0000000000200000)
 #define FW_FEATURE_LPAR                ASM_CONST(0x0000000000400000)
+#define FW_FEATURE_PS3_LV1     ASM_CONST(0x0000000000800000)
 
 #ifndef __ASSEMBLY__
 
@@ -58,12 +59,22 @@ enum {
        FW_FEATURE_PSERIES_ALWAYS = 0,
        FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
        FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
+       FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+       FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+       FW_FEATURE_NATIVE_POSSIBLE = 0,
+       FW_FEATURE_NATIVE_ALWAYS = 0,
        FW_FEATURE_POSSIBLE =
 #ifdef CONFIG_PPC_PSERIES
                FW_FEATURE_PSERIES_POSSIBLE |
 #endif
 #ifdef CONFIG_PPC_ISERIES
                FW_FEATURE_ISERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_PS3
+               FW_FEATURE_PS3_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_NATIVE
+               FW_FEATURE_NATIVE_ALWAYS |
 #endif
                0,
        FW_FEATURE_ALWAYS =
@@ -72,6 +83,12 @@ enum {
 #endif
 #ifdef CONFIG_PPC_ISERIES
                FW_FEATURE_ISERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_PS3
+               FW_FEATURE_PS3_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_NATIVE
+               FW_FEATURE_NATIVE_ALWAYS &
 #endif
                FW_FEATURE_POSSIBLE,
 
index 936422e54891f0cab44229639f8321e9d47a270e..3f3673fd3ff34abef169c139c71302084b2cc874 100644 (file)
@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index d40359204abaa1ff38743c718a72e40ba22d3f96..d604863d72fb08b9b69a757fe20f117edb8e3cae 100644 (file)
@@ -7,16 +7,40 @@
 #ifdef __KERNEL__
 
 #include <linux/errno.h>
+#include <linux/compiler.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
 extern void timer_interrupt(struct pt_regs *);
 
-#ifdef CONFIG_PPC_ISERIES
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
+static inline unsigned long local_get_flags(void)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__("lbz %0,%1(13)"
+       : "=r" (flags)
+       : "i" (offsetof(struct paca_struct, soft_enabled)));
+
+       return flags;
+}
+
+static inline unsigned long local_irq_disable(void)
+{
+       unsigned long flags, zero;
+
+       __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
+       : "=r" (flags), "=&r" (zero)
+       : "i" (offsetof(struct paca_struct, soft_enabled))
+       : "memory");
+
+       return flags;
+}
 
-extern unsigned long local_get_flags(void);
-extern unsigned long local_irq_disable(void);
 extern void local_irq_restore(unsigned long);
+extern void iseries_handle_interrupts(void);
 
 #define local_irq_enable()     local_irq_restore(1)
 #define local_save_flags(flags)        ((flags) = local_get_flags())
@@ -24,17 +48,14 @@ extern void local_irq_restore(unsigned long);
 
 #define irqs_disabled()                (local_get_flags() == 0)
 
+#define hard_irq_enable()      __mtmsrd(mfmsr() | MSR_EE, 1)
+#define hard_irq_disable()     __mtmsrd(mfmsr() & ~MSR_EE, 1)
+
 #else
 
 #if defined(CONFIG_BOOKE)
 #define SET_MSR_EE(x)  mtmsr(x)
 #define local_irq_restore(flags)       __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
-#elif defined(__powerpc64__)
-#define SET_MSR_EE(x)  __mtmsrd(x, 1)
-#define local_irq_restore(flags) do { \
-       __asm__ __volatile__("": : :"memory"); \
-       __mtmsrd((flags), 1); \
-} while(0)
 #else
 #define SET_MSR_EE(x)  mtmsr(x)
 #define local_irq_restore(flags)       mtmsr(flags)
@@ -81,7 +102,10 @@ static inline void local_irq_save_ptr(unsigned long *flags)
 #define local_irq_save(flags)  local_irq_save_ptr(&flags)
 #define irqs_disabled()                ((mfmsr() & MSR_EE) == 0)
 
-#endif /* CONFIG_PPC_ISERIES */
+#define hard_irq_enable()      local_irq_enable()
+#define hard_irq_disable()     local_irq_disable()
+
+#endif /* CONFIG_PPC64 */
 
 #define mask_irq(irq)                                          \
        ({                                                      \
index 3493429b70f5a577ab933cd3060f135157ad7f36..66112114b8c588b066c1ade4bd9778cda00bdb3c 100644 (file)
@@ -44,7 +44,6 @@
 #include <linux/mod_devicetable.h>
 #include <asm/of_device.h>
 
-extern struct dma_mapping_ops ibmebus_dma_ops;
 extern struct bus_type ibmebus_bus_type;
 
 struct ibmebus_dev {   
index c8390f9485de9dd6ee6c9cfc13dab1a218c7e00a..0f66f0f82c329709f1124cabc6cfd8013c6e54ae 100644 (file)
 #endif
 #endif
 
-#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c))
-#define __ide_mm_outsw(p, a, c)        _outsw_ns((volatile u16 __iomem *)(p), (a), (c))
-#define __ide_mm_outsl(p, a, c)        _outsl_ns((volatile u32 __iomem *)(p), (a), (c))
+#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c))
+#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsw(p, a, c)        writesw((void __iomem *)(p), (a), (c))
+#define __ide_mm_outsl(p, a, c)        writesl((void __iomem *)(p), (a), (c))
 
 #ifndef  __powerpc64__
 #include <linux/hdreg.h>
index ce12f85fff9b1a04afca7b24b9ef62754847841e..9fdd0491f6a37fdc75ecc940ca6e73dfb1d50605 100644 (file)
@@ -136,22 +136,7 @@ struct qe_timers {
 
 /* BRG */
 struct qe_brg {
-       __be32  brgc1;          /* BRG1 configuration register */
-       __be32  brgc2;          /* BRG2 configuration register */
-       __be32  brgc3;          /* BRG3 configuration register */
-       __be32  brgc4;          /* BRG4 configuration register */
-       __be32  brgc5;          /* BRG5 configuration register */
-       __be32  brgc6;          /* BRG6 configuration register */
-       __be32  brgc7;          /* BRG7 configuration register */
-       __be32  brgc8;          /* BRG8 configuration register */
-       __be32  brgc9;          /* BRG9 configuration register */
-       __be32  brgc10;         /* BRG10 configuration register */
-       __be32  brgc11;         /* BRG11 configuration register */
-       __be32  brgc12;         /* BRG12 configuration register */
-       __be32  brgc13;         /* BRG13 configuration register */
-       __be32  brgc14;         /* BRG14 configuration register */
-       __be32  brgc15;         /* BRG15 configuration register */
-       __be32  brgc16;         /* BRG16 configuration register */
+       __be32  brgc[16];       /* BRG configuration registers */
        u8      res0[0x40];
 } __attribute__ ((packed));
 
diff --git a/include/asm-powerpc/io-defs.h b/include/asm-powerpc/io-defs.h
new file mode 100644 (file)
index 0000000..03691ab
--- /dev/null
@@ -0,0 +1,59 @@
+/* This file is meant to be include multiple times by other headers */
+
+DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr))
+
+#ifdef __powerpc64__
+DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr))
+DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr))
+DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr))
+#endif /* __powerpc64__ */
+
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port))
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port))
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port))
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port))
+
+DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+                (a, b, c))
+DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+                (a, b, c))
+DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c), \
+                (a, b, c))
+DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+                (a, b, c))
+DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+                (a, b, c))
+DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c), \
+                (a, b, c))
+
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), \
+                (p, b, c))
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), \
+                (p, b, c))
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), \
+                (p, b, c))
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), \
+                (p, b, c))
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), \
+                (p, b, c))
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), \
+                (p, b, c))
+
+DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),      \
+                (a, c, n))
+DEF_PCI_AC_NORET(memcpy_fromio,(void *d,const PCI_IO_ADDR s,unsigned long n), \
+                (d, s, n))
+DEF_PCI_AC_NORET(memcpy_toio,(PCI_IO_ADDR d,const void *s,unsigned long n),   \
+                (d, s, n))
index c2c5f14b5f5ff3dd8c335cf4025d3c34096f58a1..1cd532379c30035fd64f01d5d973c7414ffb15dc 100644 (file)
 extern int check_legacy_ioport(unsigned long base_port);
 #define PNPBIOS_BASE   0xf000  /* only relevant for PReP */
 
-#ifndef CONFIG_PPC64
-#include <asm-ppc/io.h>
-#else
-
 #include <linux/compiler.h>
 #include <asm/page.h>
 #include <asm/byteorder.h>
-#include <asm/paca.h>
 #include <asm/synch.h>
 #include <asm/delay.h>
+#include <asm/mmu.h>
 
 #include <asm-generic/iomap.h>
 
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+
 #define SIO_CONFIG_RA  0x398
 #define SIO_CONFIG_RD  0x399
 
 #define SLOW_DOWN_IO
 
+/* 32 bits uses slightly different variables for the various IO
+ * bases. Most of this file only uses _IO_BASE though which we
+ * define properly based on the platform
+ */
+#ifndef CONFIG_PCI
+#define _IO_BASE       0
+#define _ISA_MEM_BASE  0
+#define PCI_DRAM_OFFSET 0
+#elif defined(CONFIG_PPC32)
+#define _IO_BASE       isa_io_base
+#define _ISA_MEM_BASE  isa_mem_base
+#define PCI_DRAM_OFFSET        pci_dram_offset
+#else
+#define _IO_BASE       pci_io_base
+#define _ISA_MEM_BASE  0
+#define PCI_DRAM_OFFSET        0
+#endif
+
 extern unsigned long isa_io_base;
+extern unsigned long isa_mem_base;
 extern unsigned long pci_io_base;
+extern unsigned long pci_dram_offset;
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
+#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+#endif
+
+/*
+ *
+ * Low level MMIO accessors
+ *
+ * This provides the non-bus specific accessors to MMIO. Those are PowerPC
+ * specific and thus shouldn't be used in generic code. The accessors
+ * provided here are:
+ *
+ *     in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
+ *     out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
+ *     _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ *
+ * Those operate directly on a kernel virtual address. Note that the prototype
+ * for the out_* accessors has the arguments in opposite order from the usual
+ * linux PCI accessors. Unlike those, they take the address first and the value
+ * next.
+ *
+ * Note: I might drop the _ns suffix on the stream operations soon as it is
+ * simply normal for stream operations to not swap in the first place.
+ *
+ */
+
+#ifdef CONFIG_PPC64
+#define IO_SET_SYNC_FLAG()     do { get_paca()->io_sync = 1; } while(0)
+#else
+#define IO_SET_SYNC_FLAG()
+#endif
+
+#define DEF_MMIO_IN(name, type, insn)                                  \
+static inline type name(const volatile type __iomem *addr)             \
+{                                                                      \
+       type ret;                                                       \
+       __asm__ __volatile__("sync;" insn ";twi 0,%0,0;isync"           \
+               : "=r" (ret) : "r" (addr), "m" (*addr));                \
+       return ret;                                                     \
+}
+
+#define DEF_MMIO_OUT(name, type, insn)                                 \
+static inline void name(volatile type __iomem *addr, type val)         \
+{                                                                      \
+       __asm__ __volatile__("sync;" insn                               \
+               : "=m" (*addr) : "r" (val), "r" (addr));                \
+       IO_SET_SYNC_FLAG();                                     \
+}
+
+
+#define DEF_MMIO_IN_BE(name, size, insn) \
+       DEF_MMIO_IN(name, u##size, __stringify(insn)"%U2%X2 %0,%2")
+#define DEF_MMIO_IN_LE(name, size, insn) \
+       DEF_MMIO_IN(name, u##size, __stringify(insn)" %0,0,%1")
+
+#define DEF_MMIO_OUT_BE(name, size, insn) \
+       DEF_MMIO_OUT(name, u##size, __stringify(insn)"%U0%X0 %1,%0")
+#define DEF_MMIO_OUT_LE(name, size, insn) \
+       DEF_MMIO_OUT(name, u##size, __stringify(insn)" %1,0,%2")
+
+DEF_MMIO_IN_BE(in_8,     8, lbz);
+DEF_MMIO_IN_BE(in_be16, 16, lhz);
+DEF_MMIO_IN_BE(in_be32, 32, lwz);
+DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
+DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+
+DEF_MMIO_OUT_BE(out_8,     8, stb);
+DEF_MMIO_OUT_BE(out_be16, 16, sth);
+DEF_MMIO_OUT_BE(out_be32, 32, stw);
+DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+
+#ifdef __powerpc64__
+DEF_MMIO_OUT_BE(out_be64, 64, std);
+DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_le64(const volatile u64 __iomem *addr)
+{
+       return le64_to_cpu(in_be64(addr));
+}
+
+static inline void out_le64(volatile u64 __iomem *addr, u64 val)
+{
+       out_be64(addr, cpu_to_le64(val));
+}
+#endif /* __powerpc64__ */
+
+/*
+ * Low level IO stream instructions are defined out of line for now
+ */
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
+
+/* The _ns naming is historical and will be removed. For now, just #define
+ * the non _ns equivalent names
+ */
+#define _insw  _insw_ns
+#define _insl  _insl_ns
+#define _outsw _outsw_ns
+#define _outsl _outsl_ns
+
+
+/*
+ * memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
+ */
+
+extern void _memset_io(volatile void __iomem *addr, int c, unsigned long n);
+extern void _memcpy_fromio(void *dest, const volatile void __iomem *src,
+                          unsigned long n);
+extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
+                        unsigned long n);
+
+/*
+ *
+ * PCI and standard ISA accessors
+ *
+ * Those are globally defined linux accessors for devices on PCI or ISA
+ * busses. They follow the Linux defined semantics. The current implementation
+ * for PowerPC is as close as possible to the x86 version of these, and thus
+ * provides fairly heavy weight barriers for the non-raw versions
+ *
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
+ * allowing the platform to provide its own implementation of some or all
+ * of the accessors.
+ */
+
+/*
+ * Include the EEH definitions when EEH is enabled only so they don't get
+ * in the way when building for 32 bits
+ */
+#ifdef CONFIG_EEH
+#include <asm/eeh.h>
+#endif
+
+/* Shortcut to the MMIO argument pointer */
+#define PCI_IO_ADDR    volatile void __iomem *
+
+/* Indirect IO address tokens:
+ *
+ * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
+ * on all IOs. (Note that this is all 64 bits only for now)
+ *
+ * To help platforms who may need to differenciate MMIO addresses in
+ * their hooks, a bitfield is reserved for use by the platform near the
+ * top of MMIO addresses (not PIO, those have to cope the hard way).
+ *
+ * This bit field is 12 bits and is at the top of the IO virtual
+ * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ *
+ * The kernel virtual space is thus:
+ *
+ *  0xD000000000000000         : vmalloc
+ *  0xD000080000000000         : PCI PHB IO space
+ *  0xD000080080000000         : ioremap
+ *  0xD0000fffffffffff         : end of ioremap region
+ *
+ * Since the top 4 bits are reserved as the region ID, we use thus
+ * the next 12 bits and keep 4 bits available for the future if the
+ * virtual address space is ever to be extended.
+ *
+ * The direct IO mapping operations will then mask off those bits
+ * before doing the actual access, though that only happen when
+ * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * mechanism
+ */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define PCI_IO_IND_TOKEN_MASK  0x0fff000000000000ul
+#define PCI_IO_IND_TOKEN_SHIFT 48
+#define PCI_FIX_ADDR(addr)                                             \
+       ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
+#define PCI_GET_ADDR_TOKEN(addr)                                       \
+       (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >>             \
+               PCI_IO_IND_TOKEN_SHIFT)
+#define PCI_SET_ADDR_TOKEN(addr, token)                                \
+do {                                                                   \
+       unsigned long __a = (unsigned long)(addr);                      \
+       __a &= ~PCI_IO_IND_TOKEN_MASK;                                  \
+       __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT;      \
+       (addr) = (void __iomem *)__a;                                   \
+} while(0)
+#else
+#define PCI_FIX_ADDR(addr) (addr)
+#endif
 
-#ifdef CONFIG_PPC_ISERIES
-
-extern int in_8(const volatile unsigned char __iomem *addr);
-extern void out_8(volatile unsigned char __iomem *addr, int val);
-extern int in_le16(const volatile unsigned short __iomem *addr);
-extern int in_be16(const volatile unsigned short __iomem *addr);
-extern void out_le16(volatile unsigned short __iomem *addr, int val);
-extern void out_be16(volatile unsigned short __iomem *addr, int val);
-extern unsigned in_le32(const volatile unsigned __iomem *addr);
-extern unsigned in_be32(const volatile unsigned __iomem *addr);
-extern void out_le32(volatile unsigned __iomem *addr, int val);
-extern void out_be32(volatile unsigned __iomem *addr, int val);
-extern unsigned long in_le64(const volatile unsigned long __iomem *addr);
-extern unsigned long in_be64(const volatile unsigned long __iomem *addr);
-extern void out_le64(volatile unsigned long __iomem *addr, unsigned long val);
-extern void out_be64(volatile unsigned long __iomem *addr, unsigned long val);
-
-extern unsigned char __raw_readb(const volatile void __iomem *addr);
-extern unsigned short __raw_readw(const volatile void __iomem *addr);
-extern unsigned int __raw_readl(const volatile void __iomem *addr);
-extern unsigned long __raw_readq(const volatile void __iomem *addr);
-extern void __raw_writeb(unsigned char v, volatile void __iomem *addr);
-extern void __raw_writew(unsigned short v, volatile void __iomem *addr);
-extern void __raw_writel(unsigned int v, volatile void __iomem *addr);
-extern void __raw_writeq(unsigned long v, volatile void __iomem *addr);
-
-extern void memset_io(volatile void __iomem *addr, int c, unsigned long n);
-extern void memcpy_fromio(void *dest, const volatile void __iomem *src,
-                                 unsigned long n);
-extern void memcpy_toio(volatile void __iomem *dest, const void *src,
-                                 unsigned long n);
-
-#else /* CONFIG_PPC_ISERIES */
-
-#define in_8(addr)             __in_8((addr))
-#define out_8(addr, val)       __out_8((addr), (val))
-#define in_le16(addr)          __in_le16((addr))
-#define in_be16(addr)          __in_be16((addr))
-#define out_le16(addr, val)    __out_le16((addr), (val))
-#define out_be16(addr, val)    __out_be16((addr), (val))
-#define in_le32(addr)          __in_le32((addr))
-#define in_be32(addr)          __in_be32((addr))
-#define out_le32(addr, val)    __out_le32((addr), (val))
-#define out_be32(addr, val)    __out_be32((addr), (val))
-#define in_le64(addr)          __in_le64((addr))
-#define in_be64(addr)          __in_be64((addr))
-#define out_le64(addr, val)    __out_le64((addr), (val))
-#define out_be64(addr, val)    __out_be64((addr), (val))
+
+/*
+ * Non ordered and non-swapping "raw" accessors
+ */
 
 static inline unsigned char __raw_readb(const volatile void __iomem *addr)
 {
-       return *(volatile unsigned char __force *)addr;
+       return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
 }
 static inline unsigned short __raw_readw(const volatile void __iomem *addr)
 {
-       return *(volatile unsigned short __force *)addr;
+       return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
 }
 static inline unsigned int __raw_readl(const volatile void __iomem *addr)
 {
-       return *(volatile unsigned int __force *)addr;
-}
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned long __force *)addr;
+       return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
 }
 static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
 {
-       *(volatile unsigned char __force *)addr = v;
+       *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
 }
 static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
 {
-       *(volatile unsigned short __force *)addr = v;
+       *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
 }
 static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
 {
-       *(volatile unsigned int __force *)addr = v;
+       *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
+}
+
+#ifdef __powerpc64__
+static inline unsigned long __raw_readq(const volatile void __iomem *addr)
+{
+       return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
 }
 static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
 {
-       *(volatile unsigned long __force *)addr = v;
+       *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
+}
+#endif /* __powerpc64__ */
+
+/*
+ *
+ * PCI PIO and MMIO accessors.
+ *
+ *
+ * On 32 bits, PIO operations have a recovery mechanism in case they trigger
+ * machine checks (which they occasionally do when probing non existing
+ * IO ports on some platforms, like PowerMac and 8xx).
+ * I always found it to be of dubious reliability and I am tempted to get
+ * rid of it one of these days. So if you think it's important to keep it,
+ * please voice up asap. We never had it for 64 bits and I do not intend
+ * to port it over
+ */
+
+#ifdef CONFIG_PPC32
+
+#define __do_in_asm(name, op)                          \
+static inline unsigned int name(unsigned int port)     \
+{                                                      \
+       unsigned int x;                                 \
+       __asm__ __volatile__(                           \
+               "sync\n"                                \
+               "0:"    op "    %0,0,%1\n"              \
+               "1:     twi     0,%0,0\n"               \
+               "2:     isync\n"                        \
+               "3:     nop\n"                          \
+               "4:\n"                                  \
+               ".section .fixup,\"ax\"\n"              \
+               "5:     li      %0,-1\n"                \
+               "       b       4b\n"                   \
+               ".previous\n"                           \
+               ".section __ex_table,\"a\"\n"           \
+               "       .align  2\n"                    \
+               "       .long   0b,5b\n"                \
+               "       .long   1b,5b\n"                \
+               "       .long   2b,5b\n"                \
+               "       .long   3b,5b\n"                \
+               ".previous"                             \
+               : "=&r" (x)                             \
+               : "r" (port + _IO_BASE));               \
+       return x;                                       \
+}
+
+#define __do_out_asm(name, op)                         \
+static inline void name(unsigned int val, unsigned int port) \
+{                                                      \
+       __asm__ __volatile__(                           \
+               "sync\n"                                \
+               "0:" op " %0,0,%1\n"                    \
+               "1:     sync\n"                         \
+               "2:\n"                                  \
+               ".section __ex_table,\"a\"\n"           \
+               "       .align  2\n"                    \
+               "       .long   0b,2b\n"                \
+               "       .long   1b,2b\n"                \
+               ".previous"                             \
+               : : "r" (val), "r" (port + _IO_BASE));  \
+}
+
+__do_in_asm(_rec_inb, "lbzx")
+__do_in_asm(_rec_inw, "lhbrx")
+__do_in_asm(_rec_inl, "lwbrx")
+__do_out_asm(_rec_outb, "stbx")
+__do_out_asm(_rec_outw, "sthbrx")
+__do_out_asm(_rec_outl, "stwbrx")
+
+#endif /* CONFIG_PPC32 */
+
+/* The "__do_*" operations below provide the actual "base" implementation
+ * for each of the defined acccessor. Some of them use the out_* functions
+ * directly, some of them still use EEH, though we might change that in the
+ * future. Those macros below provide the necessary argument swapping and
+ * handling of the IO base for PIO.
+ *
+ * They are themselves used by the macros that define the actual accessors
+ * and can be used by the hooks if any.
+ *
+ * Note that PIO operations are always defined in terms of their corresonding
+ * MMIO operations. That allows platforms like iSeries who want to modify the
+ * behaviour of both to only hook on the MMIO version and get both. It's also
+ * possible to hook directly at the toplevel PIO operation if they have to
+ * be handled differently
+ */
+#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val)
+#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val)
+#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val)
+#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
+#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
+#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
+
+#ifdef CONFIG_EEH
+#define __do_readb(addr)       eeh_readb(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)       eeh_readw(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)       eeh_readl(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)       eeh_readq(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)    eeh_readw_be(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)    eeh_readl_be(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)    eeh_readq_be(PCI_FIX_ADDR(addr))
+#else /* CONFIG_EEH */
+#define __do_readb(addr)       in_8(PCI_FIX_ADDR(addr))
+#define __do_readw(addr)       in_le16(PCI_FIX_ADDR(addr))
+#define __do_readl(addr)       in_le32(PCI_FIX_ADDR(addr))
+#define __do_readq(addr)       in_le64(PCI_FIX_ADDR(addr))
+#define __do_readw_be(addr)    in_be16(PCI_FIX_ADDR(addr))
+#define __do_readl_be(addr)    in_be32(PCI_FIX_ADDR(addr))
+#define __do_readq_be(addr)    in_be64(PCI_FIX_ADDR(addr))
+#endif /* !defined(CONFIG_EEH) */
+
+#ifdef CONFIG_PPC32
+#define __do_outb(val, port)   _rec_outb(val, port)
+#define __do_outw(val, port)   _rec_outw(val, port)
+#define __do_outl(val, port)   _rec_outl(val, port)
+#define __do_inb(port)         _rec_inb(port)
+#define __do_inw(port)         _rec_inw(port)
+#define __do_inl(port)         _rec_inl(port)
+#else /* CONFIG_PPC32 */
+#define __do_outb(val, port)   writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outw(val, port)   writew(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_outl(val, port)   writel(val,(PCI_IO_ADDR)_IO_BASE+port);
+#define __do_inb(port)         readb((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inw(port)         readw((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_inl(port)         readl((PCI_IO_ADDR)_IO_BASE + port);
+#endif /* !CONFIG_PPC32 */
+
+#ifdef CONFIG_EEH
+#define __do_readsb(a, b, n)   eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)   eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)   eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#else /* CONFIG_EEH */
+#define __do_readsb(a, b, n)   _insb(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsw(a, b, n)   _insw(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsl(a, b, n)   _insl(PCI_FIX_ADDR(a), (b), (n))
+#endif /* !CONFIG_EEH */
+#define __do_writesb(a, b, n)  _outsb(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesw(a, b, n)  _outsw(PCI_FIX_ADDR(a),(b),(n))
+#define __do_writesl(a, b, n)  _outsl(PCI_FIX_ADDR(a),(b),(n))
+
+#define __do_insb(p, b, n)     readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insw(p, b, n)     readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_insl(p, b, n)     readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
+#define __do_outsb(p, b, n)    writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsw(p, b, n)    writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_outsl(p, b, n)    writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+
+#define __do_memset_io(addr, c, n)     \
+                               _memset_io(PCI_FIX_ADDR(addr), c, n)
+#define __do_memcpy_toio(dst, src, n)  \
+                               _memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+
+#ifdef CONFIG_EEH
+#define __do_memcpy_fromio(dst, src, n)        \
+                               eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+#else /* CONFIG_EEH */
+#define __do_memcpy_fromio(dst, src, n)        \
+                               _memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+#endif /* !CONFIG_EEH */
+
+#ifdef CONFIG_PPC_INDIRECT_IO
+#define DEF_PCI_HOOK(x)                x
+#else
+#define DEF_PCI_HOOK(x)                NULL
+#endif
+
+/* Structure containing all the hooks */
+extern struct ppc_pci_io {
+
+#define DEF_PCI_AC_RET(name, ret, at, al)      ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al)         void (*name) at;
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+} ppc_pci_io;
+
+/* The inline wrappers */
+#define DEF_PCI_AC_RET(name, ret, at, al)                      \
+static inline ret name at                                      \
+{                                                              \
+       if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL)              \
+               return ppc_pci_io.name al;                      \
+       return __do_##name al;                                  \
+}
+
+#define DEF_PCI_AC_NORET(name, at, al)                         \
+static inline void name at                                     \
+{                                                              \
+       if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL)              \
+               ppc_pci_io.name al;                             \
+       else                                                    \
+               __do_##name al;                                 \
 }
-#define memset_io(a,b,c)       eeh_memset_io((a),(b),(c))
-#define memcpy_fromio(a,b,c)   eeh_memcpy_fromio((a),(b),(c))
-#define memcpy_toio(a,b,c)     eeh_memcpy_toio((a),(b),(c))
 
-#endif /* CONFIG_PPC_ISERIES */
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+/* Some drivers check for the presence of readq & writeq with
+ * a #ifdef, so we make them happy here.
+ */
+#ifdef __powerpc64__
+#define readq  readq
+#define writeq writeq
+#endif
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+
+#define dma_cache_inv(_start,_size) \
+       invalidate_dcache_range(_start, (_start + _size))
+#define dma_cache_wback(_start,_size) \
+       clean_dcache_range(_start, (_start + _size))
+#define dma_cache_wback_inv(_start,_size) \
+       flush_dcache_range(_start, (_start + _size))
+
+#else /* CONFIG_NOT_COHERENT_CACHE */
+
+#define dma_cache_inv(_start,_size)            do { } while (0)
+#define dma_cache_wback(_start,_size)          do { } while (0)
+#define dma_cache_wback_inv(_start,_size)      do { } while (0)
+
+#endif /* !CONFIG_NOT_COHERENT_CACHE */
 
 /*
- * The insw/outsw/insl/outsl macros don't do byte-swapping.
- * They are only used in practice for transferring buffers which
- * are arrays of bytes, and byte-swapping is not appropriate in
- * that case.  - paulus */
-#define insb(port, buf, ns)    eeh_insb((port), (buf), (ns))
-#define insw(port, buf, ns)    eeh_insw_ns((port), (buf), (ns))
-#define insl(port, buf, nl)    eeh_insl_ns((port), (buf), (nl))
-
-#define outsb(port, buf, ns)  _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsw(port, buf, ns)  _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
-#define outsl(port, buf, nl)  _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
-
-#define readb(addr)            eeh_readb(addr)
-#define readw(addr)            eeh_readw(addr)
-#define readl(addr)            eeh_readl(addr)
-#define readq(addr)            eeh_readq(addr)
-#define writeb(data, addr)     eeh_writeb((data), (addr))
-#define writew(data, addr)     eeh_writew((data), (addr))
-#define writel(data, addr)     eeh_writel((data), (addr))
-#define writeq(data, addr)     eeh_writeq((data), (addr))
-#define inb(port)              eeh_inb((unsigned long)port)
-#define outb(val, port)                eeh_outb(val, (unsigned long)port)
-#define inw(port)              eeh_inw((unsigned long)port)
-#define outw(val, port)                eeh_outw(val, (unsigned long)port)
-#define inl(port)              eeh_inl((unsigned long)port)
-#define outl(val, port)                eeh_outl(val, (unsigned long)port)
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p)   __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p)  p
 
+/*
+ * We don't do relaxed operations yet, at least not with this semantic
+ */
 #define readb_relaxed(addr) readb(addr)
 #define readw_relaxed(addr) readw(addr)
 #define readl_relaxed(addr) readl(addr)
 #define readq_relaxed(addr) readq(addr)
 
-extern void _insb(volatile u8 __iomem *port, void *buf, long count);
-extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
-extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
-extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
-extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
-extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
-
+#ifdef CONFIG_PPC32
+#define mmiowb()
+#else
+/*
+ * Enforce synchronisation of stores vs. spin_unlock
+ * (this does it explicitely, though our implementation of spin_unlock
+ * does it implicitely too)
+ */
 static inline void mmiowb(void)
 {
        unsigned long tmp;
@@ -169,6 +545,24 @@ static inline void mmiowb(void)
        : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
        : "memory");
 }
+#endif /* !CONFIG_PPC32 */
+
+static inline void iosync(void)
+{
+        __asm__ __volatile__ ("sync" : : : "memory");
+}
+
+/* Enforce in-order execution of data I/O.
+ * No distinction between read/write on PPC; use eieio for all three.
+ * Those are fairly week though. They don't provide a barrier between
+ * MMIO and cacheable storage nor do they provide a barrier vs. locks,
+ * they only provide barriers between 2 __raw MMIO operations and
+ * possibly break write combining.
+ */
+#define iobarrier_rw() eieio()
+#define iobarrier_r()  eieio()
+#define iobarrier_w()  eieio()
+
 
 /*
  * output pause versions need a delay at least for the
@@ -185,11 +579,6 @@ static inline void mmiowb(void)
 #define IO_SPACE_LIMIT ~(0UL)
 
 
-extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
-                             unsigned long size, unsigned long flags);
-extern void __iomem *__ioremap(unsigned long address, unsigned long size,
-                      unsigned long flags);
-
 /**
  * ioremap     -   map bus memory into CPU space
  * @address:   bus address of the memory
@@ -200,14 +589,77 @@ extern void __iomem *__ioremap(unsigned long address, unsigned long size,
  * writew/writel functions and the other mmio helpers. The returned
  * address is not guaranteed to be usable directly as a virtual
  * address.
+ *
+ * We provide a few variations of it:
+ *
+ * * ioremap is the standard one and provides non-cacheable guarded mappings
+ *   and can be hooked by the platform via ppc_md
+ *
+ * * ioremap_flags allows to specify the page flags as an argument and can
+ *   also be hooked by the platform via ppc_md
+ *
+ * * ioremap_nocache is identical to ioremap
+ *
+ * * iounmap undoes such a mapping and can be hooked
+ *
+ * * __ioremap_explicit (and the pending __iounmap_explicit) are low level
+ *   functions to create hand-made mappings for use only by the PCI code
+ *   and cannot currently be hooked.
+ *
+ * * __ioremap is the low level implementation used by ioremap and
+ *   ioremap_flags and cannot be hooked (but can be used by a hook on one
+ *   of the previous ones)
+ *
+ * * __iounmap, is the low level implementation used by iounmap and cannot
+ *   be hooked (but can be used by a hook on iounmap)
+ *
  */
-extern void __iomem *ioremap(unsigned long address, unsigned long size);
-
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
+                                  unsigned long flags);
 #define ioremap_nocache(addr, size)    ioremap((addr), (size))
-extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size);
 extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
+                              unsigned long flags);
+extern void __iounmap(volatile void __iomem *addr);
+
+extern int __ioremap_explicit(phys_addr_t p_addr, unsigned long v_addr,
+                             unsigned long size, unsigned long flags);
+extern int __iounmap_explicit(volatile void __iomem *start,
+                             unsigned long size);
+
 extern void __iomem * reserve_phb_iospace(unsigned long size);
 
+/* Those are more 32 bits only functions */
+extern unsigned long iopa(unsigned long addr);
+extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
+extern void io_block_mapping(unsigned long virt, phys_addr_t phys,
+                            unsigned int size, int flags);
+
+
+/*
+ * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * which needs some additional definitions here. They basically allow PIO
+ * space overall to be 1GB. This will work as long as we never try to use
+ * iomap to map MMIO below 1GB which should be fine on ppc64
+ */
+#define HAVE_ARCH_PIO_SIZE             1
+#define PIO_OFFSET                     0x00000000UL
+#define PIO_MASK                       0x3fffffffUL
+#define PIO_RESERVED                   0x40000000UL
+
+#define mmio_read16be(addr)            readw_be(addr)
+#define mmio_read32be(addr)            readl_be(addr)
+#define mmio_write16be(val, addr)      writew_be(val, addr)
+#define mmio_write32be(val, addr)      writel_be(val, addr)
+#define mmio_insb(addr, dst, count)    readsb(addr, dst, count)
+#define mmio_insw(addr, dst, count)    readsw(addr, dst, count)
+#define mmio_insl(addr, dst, count)    readsl(addr, dst, count)
+#define mmio_outsb(addr, src, count)   writesb(addr, src, count)
+#define mmio_outsw(addr, src, count)   writesw(addr, src, count)
+#define mmio_outsl(addr, src, count)   writesl(addr, src, count)
+
 /**
  *     virt_to_phys    -       map virtual addresses to physical
  *     @address: address to remap
@@ -254,178 +706,33 @@ static inline void * phys_to_virt(unsigned long address)
  */
 #define BIO_VMERGE_BOUNDARY    0
 
-static inline void iosync(void)
-{
-        __asm__ __volatile__ ("sync" : : : "memory");
-}
-
-/* Enforce in-order execution of data I/O. 
- * No distinction between read/write on PPC; use eieio for all three.
- */
-#define iobarrier_rw() eieio()
-#define iobarrier_r()  eieio()
-#define iobarrier_w()  eieio()
-
 /*
- * 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
- * These routines do not perform EEH-related I/O address translation,
- * and should not be used directly by device drivers.  Use inb/readb
- * instead.
+ * 32 bits still uses virt_to_bus() for it's implementation of DMA
+ * mappings se we have to keep it defined here. We also have some old
+ * drivers (shame shame shame) that use bus_to_virt() and haven't been
+ * fixed yet so I need to define it here.
  */
-static inline int __in_8(const volatile unsigned char __iomem *addr)
-{
-       int ret;
+#ifdef CONFIG_PPC32
 
-       __asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "m" (*addr));
-       return ret;
-}
-
-static inline void __out_8(volatile unsigned char __iomem *addr, int val)
-{
-       __asm__ __volatile__("sync; stb%U0%X0 %1,%0"
-                            : "=m" (*addr) : "r" (val));
-       get_paca()->io_sync = 1;
-}
-
-static inline int __in_le16(const volatile unsigned short __iomem *addr)
+static inline unsigned long virt_to_bus(volatile void * address)
 {
-       int ret;
-
-       __asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "r" (addr), "m" (*addr));
-       return ret;
+        if (address == NULL)
+               return 0;
+        return __pa(address) + PCI_DRAM_OFFSET;
 }
 
-static inline int __in_be16(const volatile unsigned short __iomem *addr)
+static inline void * bus_to_virt(unsigned long address)
 {
-       int ret;
-
-       __asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "m" (*addr));
-       return ret;
+        if (address == 0)
+               return NULL;
+        return __va(address - PCI_DRAM_OFFSET);
 }
 
-static inline void __out_le16(volatile unsigned short __iomem *addr, int val)
-{
-       __asm__ __volatile__("sync; sthbrx %1,0,%2"
-                            : "=m" (*addr) : "r" (val), "r" (addr));
-       get_paca()->io_sync = 1;
-}
-
-static inline void __out_be16(volatile unsigned short __iomem *addr, int val)
-{
-       __asm__ __volatile__("sync; sth%U0%X0 %1,%0"
-                            : "=m" (*addr) : "r" (val));
-       get_paca()->io_sync = 1;
-}
-
-static inline unsigned __in_le32(const volatile unsigned __iomem *addr)
-{
-       unsigned ret;
-
-       __asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "r" (addr), "m" (*addr));
-       return ret;
-}
+#define page_to_bus(page)      (page_to_phys(page) + PCI_DRAM_OFFSET)
 
-static inline unsigned __in_be32(const volatile unsigned __iomem *addr)
-{
-       unsigned ret;
-
-       __asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "m" (*addr));
-       return ret;
-}
-
-static inline void __out_le32(volatile unsigned __iomem *addr, int val)
-{
-       __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr)
-                            : "r" (val), "r" (addr));
-       get_paca()->io_sync = 1;
-}
-
-static inline void __out_be32(volatile unsigned __iomem *addr, int val)
-{
-       __asm__ __volatile__("sync; stw%U0%X0 %1,%0"
-                            : "=m" (*addr) : "r" (val));
-       get_paca()->io_sync = 1;
-}
-
-static inline unsigned long __in_le64(const volatile unsigned long __iomem *addr)
-{
-       unsigned long tmp, ret;
-
-       __asm__ __volatile__(
-                            "sync\n"
-                            "ld %1,0(%2)\n"
-                            "twi 0,%1,0\n"
-                            "isync\n"
-                            "rldimi %0,%1,5*8,1*8\n"
-                            "rldimi %0,%1,3*8,2*8\n"
-                            "rldimi %0,%1,1*8,3*8\n"
-                            "rldimi %0,%1,7*8,4*8\n"
-                            "rldicl %1,%1,32,0\n"
-                            "rlwimi %0,%1,8,8,31\n"
-                            "rlwimi %0,%1,24,16,23\n"
-                            : "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr));
-       return ret;
-}
-
-static inline unsigned long __in_be64(const volatile unsigned long __iomem *addr)
-{
-       unsigned long ret;
+#endif /* CONFIG_PPC32 */
 
-       __asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync"
-                            : "=r" (ret) : "m" (*addr));
-       return ret;
-}
-
-static inline void __out_le64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-       unsigned long tmp;
-
-       __asm__ __volatile__(
-                            "rldimi %0,%1,5*8,1*8\n"
-                            "rldimi %0,%1,3*8,2*8\n"
-                            "rldimi %0,%1,1*8,3*8\n"
-                            "rldimi %0,%1,7*8,4*8\n"
-                            "rldicl %1,%1,32,0\n"
-                            "rlwimi %0,%1,8,8,31\n"
-                            "rlwimi %0,%1,24,16,23\n"
-                            "sync\n"
-                            "std %0,0(%3)"
-                            : "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
-       get_paca()->io_sync = 1;
-}
-
-static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned long val)
-{
-       __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
-       get_paca()->io_sync = 1;
-}
-
-#include <asm/eeh.h>
-
-/* Nothing to do */
-
-#define dma_cache_inv(_start,_size)            do { } while (0)
-#define dma_cache_wback(_start,_size)          do { } while (0)
-#define dma_cache_wback_inv(_start,_size)      do { } while (0)
-
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
 
 #endif /* __KERNEL__ */
 
-#endif /* CONFIG_PPC64 */
 #endif /* _ASM_POWERPC_IO_H */
index 39fad685ffab6c99f72b060141ba7902d1efd607..f85dbd305558c0a187d40bfce98593379daead00 100644 (file)
@@ -34,7 +34,9 @@
 #define IOMMU_PAGE_MASK       (~((1 << IOMMU_PAGE_SHIFT) - 1))
 #define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
 
-#ifndef __ASSEMBLY__
+/* Boot time flags */
+extern int iommu_is_off;
+extern int iommu_force_on;
 
 /* Pure 2^n version of get_order */
 static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
@@ -42,8 +44,6 @@ static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
        return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
 }
 
-#endif   /* __ASSEMBLY__ */
-
 
 /*
  * IOMAP_MAX_ORDER defines the largest contiguous block
@@ -70,39 +70,31 @@ struct iommu_table {
 struct scatterlist;
 struct device_node;
 
-#ifdef CONFIG_PPC_MULTIPLATFORM
-
-/* Walks all buses and creates iommu tables */
-extern void iommu_setup_pSeries(void);
-extern void iommu_setup_dart(void);
-
 /* Frees table for an individual device node */
 extern void iommu_free_table(struct device_node *dn);
 
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 /* Initializes an iommu_table based in values set in the passed-in
  * structure
  */
 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
                                            int nid);
 
-extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
-               struct scatterlist *sglist, int nelems, unsigned long mask,
-               enum dma_data_direction direction);
+extern int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+                       int nelems, unsigned long mask,
+                       enum dma_data_direction direction);
 extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
-               int nelems, enum dma_data_direction direction);
+                          int nelems, enum dma_data_direction direction);
 
 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
-               dma_addr_t *dma_handle, unsigned long mask,
-               gfp_t flag, int node);
+                                 dma_addr_t *dma_handle, unsigned long mask,
+                                 gfp_t flag, int node);
 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
-               void *vaddr, dma_addr_t dma_handle);
+                               void *vaddr, dma_addr_t dma_handle);
 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
-               size_t size, unsigned long mask,
-               enum dma_data_direction direction);
+                                  size_t size, unsigned long mask,
+                                  enum dma_data_direction direction);
 extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
-               size_t size, enum dma_data_direction direction);
+                              size_t size, enum dma_data_direction direction);
 
 extern void iommu_init_early_pSeries(void);
 extern void iommu_init_early_iSeries(void);
index f960f5346f406d717590d337de07c6a0d9cbb384..46476e9a494ad6d2ec615f562b6a6449b69d4c91 100644 (file)
@@ -135,6 +135,10 @@ struct irq_map_entry {
 
 extern struct irq_map_entry irq_map[NR_IRQS];
 
+static inline irq_hw_number_t virq_to_hw(unsigned int virq)
+{
+       return irq_map[virq].hwirq;
+}
 
 /**
  * irq_alloc_host - Allocate a new irq_host data structure
index 0edbfe10cb37cb4ed1ad62b0ede46d5aecd4cd45..6e323a13ac3031e760295b7583bb3dec24097a40 100644 (file)
  * Boston, MA  02111-1307  USA
  */
 
+struct pci_dev;
 struct device_node;
 struct iommu_table;
 
 /* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct device_node *dn);
+extern void iommu_devnode_init_iSeries(struct pci_dev *pdev,
+                                      struct device_node *dn);
 
 /* Get table parameters from HV */
 extern void iommu_table_getparms_iSeries(unsigned long busno,
diff --git a/include/asm-powerpc/lv1call.h b/include/asm-powerpc/lv1call.h
new file mode 100644 (file)
index 0000000..f733bee
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ *  PS3 hvcall interface.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *  Copyright 2003, 2004 (c) MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_LV1CALL_H)
+#define _ASM_POWERPC_LV1CALL_H
+
+#if !defined(__ASSEMBLY__)
+
+#include <linux/types.h>
+
+/* lv1 call declaration macros */
+
+#define LV1_1_IN_ARG_DECL u64 in_1
+#define LV1_2_IN_ARG_DECL LV1_1_IN_ARG_DECL, u64 in_2
+#define LV1_3_IN_ARG_DECL LV1_2_IN_ARG_DECL, u64 in_3
+#define LV1_4_IN_ARG_DECL LV1_3_IN_ARG_DECL, u64 in_4
+#define LV1_5_IN_ARG_DECL LV1_4_IN_ARG_DECL, u64 in_5
+#define LV1_6_IN_ARG_DECL LV1_5_IN_ARG_DECL, u64 in_6
+#define LV1_7_IN_ARG_DECL LV1_6_IN_ARG_DECL, u64 in_7
+#define LV1_8_IN_ARG_DECL LV1_7_IN_ARG_DECL, u64 in_8
+#define LV1_1_OUT_ARG_DECL u64 *out_1
+#define LV1_2_OUT_ARG_DECL LV1_1_OUT_ARG_DECL, u64 *out_2
+#define LV1_3_OUT_ARG_DECL LV1_2_OUT_ARG_DECL, u64 *out_3
+#define LV1_4_OUT_ARG_DECL LV1_3_OUT_ARG_DECL, u64 *out_4
+#define LV1_5_OUT_ARG_DECL LV1_4_OUT_ARG_DECL, u64 *out_5
+#define LV1_6_OUT_ARG_DECL LV1_5_OUT_ARG_DECL, u64 *out_6
+#define LV1_7_OUT_ARG_DECL LV1_6_OUT_ARG_DECL, u64 *out_7
+
+#define LV1_0_IN_0_OUT_ARG_DECL void
+#define LV1_1_IN_0_OUT_ARG_DECL LV1_1_IN_ARG_DECL
+#define LV1_2_IN_0_OUT_ARG_DECL LV1_2_IN_ARG_DECL
+#define LV1_3_IN_0_OUT_ARG_DECL LV1_3_IN_ARG_DECL
+#define LV1_4_IN_0_OUT_ARG_DECL LV1_4_IN_ARG_DECL
+#define LV1_5_IN_0_OUT_ARG_DECL LV1_5_IN_ARG_DECL
+#define LV1_6_IN_0_OUT_ARG_DECL LV1_6_IN_ARG_DECL
+#define LV1_7_IN_0_OUT_ARG_DECL LV1_7_IN_ARG_DECL
+
+#define LV1_0_IN_1_OUT_ARG_DECL                    LV1_1_OUT_ARG_DECL
+#define LV1_1_IN_1_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_2_IN_1_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_3_IN_1_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_4_IN_1_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_5_IN_1_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_6_IN_1_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_7_IN_1_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+#define LV1_8_IN_1_OUT_ARG_DECL LV1_8_IN_ARG_DECL, LV1_1_OUT_ARG_DECL
+
+#define LV1_0_IN_2_OUT_ARG_DECL                    LV1_2_OUT_ARG_DECL
+#define LV1_1_IN_2_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_2_IN_2_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_3_IN_2_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_4_IN_2_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_5_IN_2_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_6_IN_2_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+#define LV1_7_IN_2_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_2_OUT_ARG_DECL
+
+#define LV1_0_IN_3_OUT_ARG_DECL                    LV1_3_OUT_ARG_DECL
+#define LV1_1_IN_3_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_2_IN_3_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_3_IN_3_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_4_IN_3_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_5_IN_3_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_6_IN_3_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+#define LV1_7_IN_3_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_3_OUT_ARG_DECL
+
+#define LV1_0_IN_4_OUT_ARG_DECL                    LV1_4_OUT_ARG_DECL
+#define LV1_1_IN_4_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_2_IN_4_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_3_IN_4_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_4_IN_4_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_5_IN_4_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_6_IN_4_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+#define LV1_7_IN_4_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_4_OUT_ARG_DECL
+
+#define LV1_0_IN_5_OUT_ARG_DECL                    LV1_5_OUT_ARG_DECL
+#define LV1_1_IN_5_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_2_IN_5_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_3_IN_5_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_4_IN_5_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_5_IN_5_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_6_IN_5_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+#define LV1_7_IN_5_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_5_OUT_ARG_DECL
+
+#define LV1_0_IN_6_OUT_ARG_DECL                    LV1_6_OUT_ARG_DECL
+#define LV1_1_IN_6_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_2_IN_6_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_3_IN_6_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_4_IN_6_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_5_IN_6_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_6_IN_6_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+#define LV1_7_IN_6_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_6_OUT_ARG_DECL
+
+#define LV1_0_IN_7_OUT_ARG_DECL                    LV1_7_OUT_ARG_DECL
+#define LV1_1_IN_7_OUT_ARG_DECL LV1_1_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_2_IN_7_OUT_ARG_DECL LV1_2_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_3_IN_7_OUT_ARG_DECL LV1_3_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_4_IN_7_OUT_ARG_DECL LV1_4_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_5_IN_7_OUT_ARG_DECL LV1_5_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_6_IN_7_OUT_ARG_DECL LV1_6_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+#define LV1_7_IN_7_OUT_ARG_DECL LV1_7_IN_ARG_DECL, LV1_7_OUT_ARG_DECL
+
+#define LV1_1_IN_ARGS in_1
+#define LV1_2_IN_ARGS LV1_1_IN_ARGS, in_2
+#define LV1_3_IN_ARGS LV1_2_IN_ARGS, in_3
+#define LV1_4_IN_ARGS LV1_3_IN_ARGS, in_4
+#define LV1_5_IN_ARGS LV1_4_IN_ARGS, in_5
+#define LV1_6_IN_ARGS LV1_5_IN_ARGS, in_6
+#define LV1_7_IN_ARGS LV1_6_IN_ARGS, in_7
+#define LV1_8_IN_ARGS LV1_7_IN_ARGS, in_8
+
+#define LV1_1_OUT_ARGS out_1
+#define LV1_2_OUT_ARGS LV1_1_OUT_ARGS, out_2
+#define LV1_3_OUT_ARGS LV1_2_OUT_ARGS, out_3
+#define LV1_4_OUT_ARGS LV1_3_OUT_ARGS, out_4
+#define LV1_5_OUT_ARGS LV1_4_OUT_ARGS, out_5
+#define LV1_6_OUT_ARGS LV1_5_OUT_ARGS, out_6
+#define LV1_7_OUT_ARGS LV1_6_OUT_ARGS, out_7
+
+#define LV1_0_IN_0_OUT_ARGS
+#define LV1_1_IN_0_OUT_ARGS LV1_1_IN_ARGS
+#define LV1_2_IN_0_OUT_ARGS LV1_2_IN_ARGS
+#define LV1_3_IN_0_OUT_ARGS LV1_3_IN_ARGS
+#define LV1_4_IN_0_OUT_ARGS LV1_4_IN_ARGS
+#define LV1_5_IN_0_OUT_ARGS LV1_5_IN_ARGS
+#define LV1_6_IN_0_OUT_ARGS LV1_6_IN_ARGS
+#define LV1_7_IN_0_OUT_ARGS LV1_7_IN_ARGS
+
+#define LV1_0_IN_1_OUT_ARGS                LV1_1_OUT_ARGS
+#define LV1_1_IN_1_OUT_ARGS LV1_1_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_2_IN_1_OUT_ARGS LV1_2_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_3_IN_1_OUT_ARGS LV1_3_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_4_IN_1_OUT_ARGS LV1_4_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_5_IN_1_OUT_ARGS LV1_5_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_6_IN_1_OUT_ARGS LV1_6_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_7_IN_1_OUT_ARGS LV1_7_IN_ARGS, LV1_1_OUT_ARGS
+#define LV1_8_IN_1_OUT_ARGS LV1_8_IN_ARGS, LV1_1_OUT_ARGS
+
+#define LV1_0_IN_2_OUT_ARGS                LV1_2_OUT_ARGS
+#define LV1_1_IN_2_OUT_ARGS LV1_1_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_2_IN_2_OUT_ARGS LV1_2_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_3_IN_2_OUT_ARGS LV1_3_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_4_IN_2_OUT_ARGS LV1_4_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_5_IN_2_OUT_ARGS LV1_5_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_6_IN_2_OUT_ARGS LV1_6_IN_ARGS, LV1_2_OUT_ARGS
+#define LV1_7_IN_2_OUT_ARGS LV1_7_IN_ARGS, LV1_2_OUT_ARGS
+
+#define LV1_0_IN_3_OUT_ARGS                LV1_3_OUT_ARGS
+#define LV1_1_IN_3_OUT_ARGS LV1_1_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_2_IN_3_OUT_ARGS LV1_2_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_3_IN_3_OUT_ARGS LV1_3_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_4_IN_3_OUT_ARGS LV1_4_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_5_IN_3_OUT_ARGS LV1_5_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_6_IN_3_OUT_ARGS LV1_6_IN_ARGS, LV1_3_OUT_ARGS
+#define LV1_7_IN_3_OUT_ARGS LV1_7_IN_ARGS, LV1_3_OUT_ARGS
+
+#define LV1_0_IN_4_OUT_ARGS                LV1_4_OUT_ARGS
+#define LV1_1_IN_4_OUT_ARGS LV1_1_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_2_IN_4_OUT_ARGS LV1_2_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_3_IN_4_OUT_ARGS LV1_3_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_4_IN_4_OUT_ARGS LV1_4_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_5_IN_4_OUT_ARGS LV1_5_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_6_IN_4_OUT_ARGS LV1_6_IN_ARGS, LV1_4_OUT_ARGS
+#define LV1_7_IN_4_OUT_ARGS LV1_7_IN_ARGS, LV1_4_OUT_ARGS
+
+#define LV1_0_IN_5_OUT_ARGS                LV1_5_OUT_ARGS
+#define LV1_1_IN_5_OUT_ARGS LV1_1_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_2_IN_5_OUT_ARGS LV1_2_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_3_IN_5_OUT_ARGS LV1_3_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_4_IN_5_OUT_ARGS LV1_4_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_5_IN_5_OUT_ARGS LV1_5_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_6_IN_5_OUT_ARGS LV1_6_IN_ARGS, LV1_5_OUT_ARGS
+#define LV1_7_IN_5_OUT_ARGS LV1_7_IN_ARGS, LV1_5_OUT_ARGS
+
+#define LV1_0_IN_6_OUT_ARGS                LV1_6_OUT_ARGS
+#define LV1_1_IN_6_OUT_ARGS LV1_1_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_2_IN_6_OUT_ARGS LV1_2_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_3_IN_6_OUT_ARGS LV1_3_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_4_IN_6_OUT_ARGS LV1_4_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_5_IN_6_OUT_ARGS LV1_5_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_6_IN_6_OUT_ARGS LV1_6_IN_ARGS, LV1_6_OUT_ARGS
+#define LV1_7_IN_6_OUT_ARGS LV1_7_IN_ARGS, LV1_6_OUT_ARGS
+
+#define LV1_0_IN_7_OUT_ARGS                LV1_7_OUT_ARGS
+#define LV1_1_IN_7_OUT_ARGS LV1_1_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_2_IN_7_OUT_ARGS LV1_2_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_3_IN_7_OUT_ARGS LV1_3_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_4_IN_7_OUT_ARGS LV1_4_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_5_IN_7_OUT_ARGS LV1_5_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_6_IN_7_OUT_ARGS LV1_6_IN_ARGS, LV1_7_OUT_ARGS
+#define LV1_7_IN_7_OUT_ARGS LV1_7_IN_ARGS, LV1_7_OUT_ARGS
+
+/*
+ * This LV1_CALL() macro is for use by callers.  It expands into an
+ * inline call wrapper and an underscored HV call declaration.  The
+ * wrapper can be used to instrument the lv1 call interface.  The
+ * file lv1call.S defines its own LV1_CALL() macro to expand into
+ * the actual underscored call definition.
+ */
+
+#if !defined(LV1_CALL)
+#define LV1_CALL(name, in, out, num)                               \
+  extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL);      \
+  static inline int lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL) \
+    {return _lv1_##name(LV1_##in##_IN_##out##_OUT_ARGS);}
+#endif
+
+#endif /* !defined(__ASSEMBLY__) */
+
+/* lv1 call table */
+
+LV1_CALL(allocate_memory,                               4, 2,   0 )
+LV1_CALL(write_htab_entry,                              4, 0,   1 )
+LV1_CALL(construct_virtual_address_space,               3, 2,   2 )
+LV1_CALL(invalidate_htab_entries,                       5, 0,   3 )
+LV1_CALL(get_virtual_address_space_id_of_ppe,           1, 1,   4 )
+LV1_CALL(query_logical_partition_address_region_info,   1, 5,   6 )
+LV1_CALL(select_virtual_address_space,                  1, 0,   7 )
+LV1_CALL(pause,                                         1, 0,   9 )
+LV1_CALL(destruct_virtual_address_space,                1, 0,  10 )
+LV1_CALL(configure_irq_state_bitmap,                    3, 0,  11 )
+LV1_CALL(connect_irq_plug_ext,                          5, 0,  12 )
+LV1_CALL(release_memory,                                1, 0,  13 )
+LV1_CALL(disconnect_irq_plug_ext,                       3, 0,  17 )
+LV1_CALL(construct_event_receive_port,                  0, 1,  18 )
+LV1_CALL(destruct_event_receive_port,                   1, 0,  19 )
+LV1_CALL(send_event_locally,                            1, 0,  24 )
+LV1_CALL(end_of_interrupt,                              1, 0,  27 )
+LV1_CALL(connect_irq_plug,                              2, 0,  28 )
+LV1_CALL(disconnect_irq_plug,                           1, 0,  29 )
+LV1_CALL(end_of_interrupt_ext,                          3, 0,  30 )
+LV1_CALL(did_update_interrupt_mask,                     2, 0,  31 )
+LV1_CALL(shutdown_logical_partition,                    1, 0,  44 )
+LV1_CALL(destruct_logical_spe,                          1, 0,  54 )
+LV1_CALL(construct_logical_spe,                         7, 6,  57 )
+LV1_CALL(set_spe_interrupt_mask,                        3, 0,  61 )
+LV1_CALL(set_spe_transition_notifier,                   3, 0,  64 )
+LV1_CALL(disable_logical_spe,                           2, 0,  65 )
+LV1_CALL(clear_spe_interrupt_status,                    4, 0,  66 )
+LV1_CALL(get_spe_interrupt_status,                      2, 1,  67 )
+LV1_CALL(get_logical_ppe_id,                            0, 1,  69 )
+LV1_CALL(set_interrupt_mask,                            5, 0,  73 )
+LV1_CALL(get_logical_partition_id,                      0, 1,  74 )
+LV1_CALL(configure_execution_time_variable,             1, 0,  77 )
+LV1_CALL(get_spe_irq_outlet,                            2, 1,  78 )
+LV1_CALL(set_spe_privilege_state_area_1_register,       3, 0,  79 )
+LV1_CALL(create_repository_node,                        6, 0,  90 )
+LV1_CALL(get_repository_node_value,                     5, 2,  91 )
+LV1_CALL(modify_repository_node_value,                  6, 0,  92 )
+LV1_CALL(remove_repository_node,                        4, 0,  93 )
+LV1_CALL(read_htab_entries,                             2, 5,  95 )
+LV1_CALL(set_dabr,                                      2, 0,  96 )
+LV1_CALL(get_total_execution_time,                      2, 1, 103 )
+LV1_CALL(construct_io_irq_outlet,                       1, 1, 120 )
+LV1_CALL(destruct_io_irq_outlet,                        1, 0, 121 )
+LV1_CALL(map_htab,                                      1, 1, 122 )
+LV1_CALL(unmap_htab,                                    1, 0, 123 )
+LV1_CALL(get_version_info,                              0, 1, 127 )
+LV1_CALL(insert_htab_entry,                             6, 3, 158 )
+LV1_CALL(read_virtual_uart,                             3, 1, 162 )
+LV1_CALL(write_virtual_uart,                            3, 1, 163 )
+LV1_CALL(set_virtual_uart_param,                        3, 0, 164 )
+LV1_CALL(get_virtual_uart_param,                        2, 1, 165 )
+LV1_CALL(configure_virtual_uart_irq,                    1, 1, 166 )
+LV1_CALL(open_device,                                   3, 0, 170 )
+LV1_CALL(close_device,                                  2, 0, 171 )
+LV1_CALL(map_device_mmio_region,                        5, 1, 172 )
+LV1_CALL(unmap_device_mmio_region,                      3, 0, 173 )
+LV1_CALL(allocate_device_dma_region,                    5, 1, 174 )
+LV1_CALL(free_device_dma_region,                        3, 0, 175 )
+LV1_CALL(map_device_dma_region,                         6, 0, 176 )
+LV1_CALL(unmap_device_dma_region,                       4, 0, 177 )
+LV1_CALL(net_add_multicast_address,                     4, 0, 185 )
+LV1_CALL(net_remove_multicast_address,                  4, 0, 186 )
+LV1_CALL(net_start_tx_dma,                              4, 0, 187 )
+LV1_CALL(net_stop_tx_dma,                               3, 0, 188 )
+LV1_CALL(net_start_rx_dma,                              4, 0, 189 )
+LV1_CALL(net_stop_rx_dma,                               3, 0, 190 )
+LV1_CALL(net_set_interrupt_status_indicator,            4, 0, 191 )
+LV1_CALL(net_set_interrupt_mask,                        4, 0, 193 )
+LV1_CALL(net_control,                                   6, 2, 194 )
+LV1_CALL(connect_interrupt_event_receive_port,          4, 0, 197 )
+LV1_CALL(disconnect_interrupt_event_receive_port,       4, 0, 198 )
+LV1_CALL(get_spe_all_interrupt_statuses,                1, 1, 199 )
+LV1_CALL(deconfigure_virtual_uart_irq,                  0, 0, 202 )
+LV1_CALL(enable_logical_spe,                            2, 0, 207 )
+LV1_CALL(gpu_open,                                      1, 0, 210 )
+LV1_CALL(gpu_close,                                     0, 0, 211 )
+LV1_CALL(gpu_device_map,                                1, 2, 212 )
+LV1_CALL(gpu_device_unmap,                              1, 0, 213 )
+LV1_CALL(gpu_memory_allocate,                           5, 2, 214 )
+LV1_CALL(gpu_memory_free,                               1, 0, 216 )
+LV1_CALL(gpu_context_allocate,                          2, 5, 217 )
+LV1_CALL(gpu_context_free,                              1, 0, 218 )
+LV1_CALL(gpu_context_iomap,                             5, 0, 221 )
+LV1_CALL(gpu_context_attribute,                         6, 0, 225 )
+LV1_CALL(gpu_context_intr,                              1, 1, 227 )
+LV1_CALL(gpu_attribute,                                 5, 0, 228 )
+LV1_CALL(get_rtc,                                       0, 2, 232 )
+LV1_CALL(set_ppe_periodic_tracer_frequency,             1, 0, 240 )
+LV1_CALL(start_ppe_periodic_tracer,                     5, 0, 241 )
+LV1_CALL(stop_ppe_periodic_tracer,                      1, 1, 242 )
+LV1_CALL(storage_read,                                  6, 1, 245 )
+LV1_CALL(storage_write,                                 6, 1, 246 )
+LV1_CALL(storage_send_device_command,                   6, 1, 248 )
+LV1_CALL(storage_get_async_status,                      1, 2, 249 )
+LV1_CALL(storage_check_async_status,                    2, 1, 254 )
+LV1_CALL(panic,                                         1, 0, 255 )
+LV1_CALL(construct_lpm,                                 6, 3, 140 )
+LV1_CALL(destruct_lpm,                                  1, 0, 141 )
+LV1_CALL(start_lpm,                                     1, 0, 142 )
+LV1_CALL(stop_lpm,                                      1, 1, 143 )
+LV1_CALL(copy_lpm_trace_buffer,                         3, 1, 144 )
+LV1_CALL(add_lpm_event_bookmark,                        5, 0, 145 )
+LV1_CALL(delete_lpm_event_bookmark,                     3, 0, 146 )
+LV1_CALL(set_lpm_interrupt_mask,                        3, 1, 147 )
+LV1_CALL(get_lpm_interrupt_status,                      1, 1, 148 )
+LV1_CALL(set_lpm_general_control,                       5, 2, 149 )
+LV1_CALL(set_lpm_interval,                              3, 1, 150 )
+LV1_CALL(set_lpm_trigger_control,                       3, 1, 151 )
+LV1_CALL(set_lpm_counter_control,                       4, 1, 152 )
+LV1_CALL(set_lpm_group_control,                         3, 1, 153 )
+LV1_CALL(set_lpm_debug_bus_control,                     3, 1, 154 )
+LV1_CALL(set_lpm_counter,                               5, 2, 155 )
+LV1_CALL(set_lpm_signal,                                7, 0, 156 )
+LV1_CALL(set_lpm_spr_trigger,                           2, 0, 157 )
+
+#endif
index dac90dc341cb06cadac12c6787f86276f974f2fc..1b04e57235482ae59f276c89a9bbc6e0baa1ef4b 100644 (file)
@@ -26,6 +26,7 @@ struct device_node;
 struct iommu_table;
 struct rtc_time;
 struct file;
+struct pci_controller;
 #ifdef CONFIG_KEXEC
 struct kimage;
 #endif
@@ -84,9 +85,12 @@ struct machdep_calls {
        unsigned long   (*tce_get)(struct iommu_table *tbl,
                                    long index);
        void            (*tce_flush)(struct iommu_table *tbl);
-       void            (*iommu_dev_setup)(struct pci_dev *dev);
-       void            (*iommu_bus_setup)(struct pci_bus *bus);
-       void            (*irq_bus_setup)(struct pci_bus *bus);
+       void            (*pci_dma_dev_setup)(struct pci_dev *dev);
+       void            (*pci_dma_bus_setup)(struct pci_bus *bus);
+
+       void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
+                                  unsigned long flags);
+       void            (*iounmap)(volatile void __iomem *token);
 #endif /* CONFIG_PPC64 */
 
        int             (*probe)(void);
@@ -106,6 +110,10 @@ struct machdep_calls {
        /* Called after scanning the bus, before allocating resources */
        void            (*pcibios_fixup)(void);
        int             (*pci_probe_mode)(struct pci_bus *);
+       void            (*pci_irq_fixup)(struct pci_dev *dev);
+
+       /* To setup PHBs when using automatic OF platform driver for PCI */
+       int             (*pci_setup_phb)(struct pci_controller *host);
 
        void            (*restart)(char *cmd);
        void            (*power_off)(void);
@@ -199,10 +207,6 @@ struct machdep_calls {
         * Returns 0 to allow assignment/enabling of the device. */
        int  (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
 
-       /* For interrupt routing */
-       unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
-       int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
-
        /* Called in indirect_* to avoid touching devices */
        int (*pci_exclude_device)(unsigned char, unsigned char);
 
index c3fc7a28e3cd0cb73dd245c727cf0b3d979ee082..41c8c9c5a254e10339ceaa6fcef11ff4f626f605 100644 (file)
@@ -248,21 +248,6 @@ extern void hpte_init_native(void);
 extern void hpte_init_lpar(void);
 extern void hpte_init_iSeries(void);
 
-extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
-                                    unsigned long va, unsigned long prpn,
-                                    unsigned long rflags,
-                                    unsigned long vflags, int psize);
-
-extern long native_hpte_insert(unsigned long hpte_group,
-                              unsigned long va, unsigned long prpn,
-                              unsigned long rflags,
-                              unsigned long vflags, int psize);
-
-extern long iSeries_hpte_insert(unsigned long hpte_group,
-                               unsigned long va, unsigned long prpn,
-                               unsigned long rflags,
-                               unsigned long vflags, int psize);
-
 extern void stabs_alloc(void);
 extern void slb_initialize(void);
 extern void slb_flush_and_rebolt(void);
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
new file mode 100644 (file)
index 0000000..4a28a85
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Prototypes, etc. for the Freescale MPC52xx embedded cpu chips
+ * May need to be cleaned as the port goes on ...
+ *
+ * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_POWERPC_MPC52xx_H__
+#define __ASM_POWERPC_MPC52xx_H__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/prom.h>
+#endif /* __ASSEMBLY__ */
+
+
+/* ======================================================================== */
+/* Structures mapping of some unit register set                             */
+/* ======================================================================== */
+
+#ifndef __ASSEMBLY__
+
+/* Memory Mapping Control */
+struct mpc52xx_mmap_ctl {
+       u32 mbar;               /* MMAP_CTRL + 0x00 */
+
+       u32 cs0_start;          /* MMAP_CTRL + 0x04 */
+       u32 cs0_stop;           /* MMAP_CTRL + 0x08 */
+       u32 cs1_start;          /* MMAP_CTRL + 0x0c */
+       u32 cs1_stop;           /* MMAP_CTRL + 0x10 */
+       u32 cs2_start;          /* MMAP_CTRL + 0x14 */
+       u32 cs2_stop;           /* MMAP_CTRL + 0x18 */
+       u32 cs3_start;          /* MMAP_CTRL + 0x1c */
+       u32 cs3_stop;           /* MMAP_CTRL + 0x20 */
+       u32 cs4_start;          /* MMAP_CTRL + 0x24 */
+       u32 cs4_stop;           /* MMAP_CTRL + 0x28 */
+       u32 cs5_start;          /* MMAP_CTRL + 0x2c */
+       u32 cs5_stop;           /* MMAP_CTRL + 0x30 */
+
+       u32 sdram0;             /* MMAP_CTRL + 0x34 */
+       u32 sdram1;             /* MMAP_CTRL + 0X38 */
+
+       u32 reserved[4];        /* MMAP_CTRL + 0x3c .. 0x48 */
+
+       u32 boot_start;         /* MMAP_CTRL + 0x4c */
+       u32 boot_stop;          /* MMAP_CTRL + 0x50 */
+
+       u32 ipbi_ws_ctrl;       /* MMAP_CTRL + 0x54 */
+
+       u32 cs6_start;          /* MMAP_CTRL + 0x58 */
+       u32 cs6_stop;           /* MMAP_CTRL + 0x5c */
+       u32 cs7_start;          /* MMAP_CTRL + 0x60 */
+       u32 cs7_stop;           /* MMAP_CTRL + 0x64 */
+};
+
+/* SDRAM control */
+struct mpc52xx_sdram {
+       u32 mode;               /* SDRAM + 0x00 */
+       u32 ctrl;               /* SDRAM + 0x04 */
+       u32 config1;            /* SDRAM + 0x08 */
+       u32 config2;            /* SDRAM + 0x0c */
+};
+
+/* SDMA */
+struct mpc52xx_sdma {
+       u32 taskBar;            /* SDMA + 0x00 */
+       u32 currentPointer;     /* SDMA + 0x04 */
+       u32 endPointer;         /* SDMA + 0x08 */
+       u32 variablePointer;    /* SDMA + 0x0c */
+
+       u8 IntVect1;            /* SDMA + 0x10 */
+       u8 IntVect2;            /* SDMA + 0x11 */
+       u16 PtdCntrl;           /* SDMA + 0x12 */
+
+       u32 IntPend;            /* SDMA + 0x14 */
+       u32 IntMask;            /* SDMA + 0x18 */
+
+       u16 tcr[16];            /* SDMA + 0x1c .. 0x3a */
+
+       u8 ipr[32];             /* SDMA + 0x3c .. 0x5b */
+
+       u32 cReqSelect;         /* SDMA + 0x5c */
+       u32 task_size0;         /* SDMA + 0x60 */
+       u32 task_size1;         /* SDMA + 0x64 */
+       u32 MDEDebug;           /* SDMA + 0x68 */
+       u32 ADSDebug;           /* SDMA + 0x6c */
+       u32 Value1;             /* SDMA + 0x70 */
+       u32 Value2;             /* SDMA + 0x74 */
+       u32 Control;            /* SDMA + 0x78 */
+       u32 Status;             /* SDMA + 0x7c */
+       u32 PTDDebug;           /* SDMA + 0x80 */
+};
+
+/* GPT */
+struct mpc52xx_gpt {
+       u32 mode;               /* GPTx + 0x00 */
+       u32 count;              /* GPTx + 0x04 */
+       u32 pwm;                /* GPTx + 0x08 */
+       u32 status;             /* GPTx + 0X0c */
+};
+
+/* GPIO */
+struct mpc52xx_gpio {
+       u32 port_config;        /* GPIO + 0x00 */
+       u32 simple_gpioe;       /* GPIO + 0x04 */
+       u32 simple_ode;         /* GPIO + 0x08 */
+       u32 simple_ddr;         /* GPIO + 0x0c */
+       u32 simple_dvo;         /* GPIO + 0x10 */
+       u32 simple_ival;        /* GPIO + 0x14 */
+       u8 outo_gpioe;          /* GPIO + 0x18 */
+       u8 reserved1[3];        /* GPIO + 0x19 */
+       u8 outo_dvo;            /* GPIO + 0x1c */
+       u8 reserved2[3];        /* GPIO + 0x1d */
+       u8 sint_gpioe;          /* GPIO + 0x20 */
+       u8 reserved3[3];        /* GPIO + 0x21 */
+       u8 sint_ode;            /* GPIO + 0x24 */
+       u8 reserved4[3];        /* GPIO + 0x25 */
+       u8 sint_ddr;            /* GPIO + 0x28 */
+       u8 reserved5[3];        /* GPIO + 0x29 */
+       u8 sint_dvo;            /* GPIO + 0x2c */
+       u8 reserved6[3];        /* GPIO + 0x2d */
+       u8 sint_inten;          /* GPIO + 0x30 */
+       u8 reserved7[3];        /* GPIO + 0x31 */
+       u16 sint_itype;         /* GPIO + 0x34 */
+       u16 reserved8;          /* GPIO + 0x36 */
+       u8 gpio_control;        /* GPIO + 0x38 */
+       u8 reserved9[3];        /* GPIO + 0x39 */
+       u8 sint_istat;          /* GPIO + 0x3c */
+       u8 sint_ival;           /* GPIO + 0x3d */
+       u8 bus_errs;            /* GPIO + 0x3e */
+       u8 reserved10;          /* GPIO + 0x3f */
+};
+
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD        4
+#define MPC52xx_GPIO_PSC_CONFIG_UART_WITH_CD   5
+#define MPC52xx_GPIO_PCI_DIS                   (1<<15)
+
+/* GPIO with WakeUp*/
+struct mpc52xx_gpio_wkup {
+       u8 wkup_gpioe;          /* GPIO_WKUP + 0x00 */
+       u8 reserved1[3];        /* GPIO_WKUP + 0x03 */
+       u8 wkup_ode;            /* GPIO_WKUP + 0x04 */
+       u8 reserved2[3];        /* GPIO_WKUP + 0x05 */
+       u8 wkup_ddr;            /* GPIO_WKUP + 0x08 */
+       u8 reserved3[3];        /* GPIO_WKUP + 0x09 */
+       u8 wkup_dvo;            /* GPIO_WKUP + 0x0C */
+       u8 reserved4[3];        /* GPIO_WKUP + 0x0D */
+       u8 wkup_inten;          /* GPIO_WKUP + 0x10 */
+       u8 reserved5[3];        /* GPIO_WKUP + 0x11 */
+       u8 wkup_iinten;         /* GPIO_WKUP + 0x14 */
+       u8 reserved6[3];        /* GPIO_WKUP + 0x15 */
+       u16 wkup_itype;         /* GPIO_WKUP + 0x18 */
+       u8 reserved7[2];        /* GPIO_WKUP + 0x1A */
+       u8 wkup_maste;          /* GPIO_WKUP + 0x1C */
+       u8 reserved8[3];        /* GPIO_WKUP + 0x1D */
+       u8 wkup_ival;           /* GPIO_WKUP + 0x20 */
+       u8 reserved9[3];        /* GPIO_WKUP + 0x21 */
+       u8 wkup_istat;          /* GPIO_WKUP + 0x24 */
+       u8 reserved10[3];       /* GPIO_WKUP + 0x25 */
+};
+
+/* XLB Bus control */
+struct mpc52xx_xlb {
+       u8 reserved[0x40];
+       u32 config;             /* XLB + 0x40 */
+       u32 version;            /* XLB + 0x44 */
+       u32 status;             /* XLB + 0x48 */
+       u32 int_enable;         /* XLB + 0x4c */
+       u32 addr_capture;       /* XLB + 0x50 */
+       u32 bus_sig_capture;    /* XLB + 0x54 */
+       u32 addr_timeout;       /* XLB + 0x58 */
+       u32 data_timeout;       /* XLB + 0x5c */
+       u32 bus_act_timeout;    /* XLB + 0x60 */
+       u32 master_pri_enable;  /* XLB + 0x64 */
+       u32 master_priority;    /* XLB + 0x68 */
+       u32 base_address;       /* XLB + 0x6c */
+       u32 snoop_window;       /* XLB + 0x70 */
+};
+
+#define MPC52xx_XLB_CFG_PLDIS          (1 << 31)
+#define MPC52xx_XLB_CFG_SNOOP          (1 << 15)
+
+/* Clock Distribution control */
+struct mpc52xx_cdm {
+       u32 jtag_id;            /* CDM + 0x00  reg0 read only */
+       u32 rstcfg;             /* CDM + 0x04  reg1 read only */
+       u32 breadcrumb;         /* CDM + 0x08  reg2 */
+
+       u8 mem_clk_sel;         /* CDM + 0x0c  reg3 byte0 */
+       u8 xlb_clk_sel;         /* CDM + 0x0d  reg3 byte1 read only */
+       u8 ipb_clk_sel;         /* CDM + 0x0e  reg3 byte2 */
+       u8 pci_clk_sel;         /* CDM + 0x0f  reg3 byte3 */
+
+       u8 ext_48mhz_en;        /* CDM + 0x10  reg4 byte0 */
+       u8 fd_enable;           /* CDM + 0x11  reg4 byte1 */
+       u16 fd_counters;        /* CDM + 0x12  reg4 byte2,3 */
+
+       u32 clk_enables;        /* CDM + 0x14  reg5 */
+
+       u8 osc_disable;         /* CDM + 0x18  reg6 byte0 */
+       u8 reserved0[3];        /* CDM + 0x19  reg6 byte1,2,3 */
+
+       u8 ccs_sleep_enable;    /* CDM + 0x1c  reg7 byte0 */
+       u8 osc_sleep_enable;    /* CDM + 0x1d  reg7 byte1 */
+       u8 reserved1;           /* CDM + 0x1e  reg7 byte2 */
+       u8 ccs_qreq_test;       /* CDM + 0x1f  reg7 byte3 */
+
+       u8 soft_reset;          /* CDM + 0x20  u8 byte0 */
+       u8 no_ckstp;            /* CDM + 0x21  u8 byte0 */
+       u8 reserved2[2];        /* CDM + 0x22  u8 byte1,2,3 */
+
+       u8 pll_lock;            /* CDM + 0x24  reg9 byte0 */
+       u8 pll_looselock;       /* CDM + 0x25  reg9 byte1 */
+       u8 pll_sm_lockwin;      /* CDM + 0x26  reg9 byte2 */
+       u8 reserved3;           /* CDM + 0x27  reg9 byte3 */
+
+       u16 reserved4;          /* CDM + 0x28  reg10 byte0,1 */
+       u16 mclken_div_psc1;    /* CDM + 0x2a  reg10 byte2,3 */
+
+       u16 reserved5;          /* CDM + 0x2c  reg11 byte0,1 */
+       u16 mclken_div_psc2;    /* CDM + 0x2e  reg11 byte2,3 */
+
+       u16 reserved6;          /* CDM + 0x30  reg12 byte0,1 */
+       u16 mclken_div_psc3;    /* CDM + 0x32  reg12 byte2,3 */
+
+       u16 reserved7;          /* CDM + 0x34  reg13 byte0,1 */
+       u16 mclken_div_psc6;    /* CDM + 0x36  reg13 byte2,3 */
+};
+
+#endif /* __ASSEMBLY__ */
+
+
+/* ========================================================================= */
+/* Prototypes for MPC52xx sysdev                                             */
+/* ========================================================================= */
+
+#ifndef __ASSEMBLY__
+
+extern void __iomem * mpc52xx_find_and_map(const char *);
+extern unsigned int mpc52xx_find_ipb_freq(struct device_node *node);
+extern void mpc52xx_setup_cpu(void);
+
+extern void mpc52xx_init_irq(void);
+extern unsigned int mpc52xx_get_irq(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_POWERPC_MPC52xx_H__ */
+
index ccdb8a21138fb4a43c30cc09093dbb781a1347c3..54142997a5843e08e1e692c7b8bb3b2ac1ed5e96 100644 (file)
 #include <platforms/85xx/mpc85xx_cds.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /* Let modules/drivers get at CCSRBAR */
 extern phys_addr_t get_ccsrbar(void);
 
index ef0a5458d2b23d959c19f70ce1aff109b2eead93..b71e7b32a5550abb26b7ddfeedca5e6503f6e1da 100644 (file)
@@ -3,6 +3,7 @@
 #ifdef __KERNEL__
 
 #include <linux/irq.h>
+#include <asm/dcr.h>
 
 /*
  * Global registers
@@ -225,6 +226,23 @@ struct mpic_irq_fixup
 #endif /* CONFIG_MPIC_BROKEN_U3 */
 
 
+enum mpic_reg_type {
+       mpic_access_mmio_le,
+       mpic_access_mmio_be,
+#ifdef CONFIG_PPC_DCR
+       mpic_access_dcr
+#endif
+};
+
+struct mpic_reg_bank {
+       u32 __iomem     *base;
+#ifdef CONFIG_PPC_DCR
+       dcr_host_t      dhost;
+       unsigned int    dbase;
+       unsigned int    doff;
+#endif /* CONFIG_PPC_DCR */
+};
+
 /* The instance data of a given MPIC */
 struct mpic
 {
@@ -264,11 +282,18 @@ struct mpic
        spinlock_t              fixup_lock;
 #endif
 
+       /* Register access method */
+       enum mpic_reg_type      reg_type;
+
        /* The various ioremap'ed bases */
-       volatile u32 __iomem    *gregs;
-       volatile u32 __iomem    *tmregs;
-       volatile u32 __iomem    *cpuregs[MPIC_MAX_CPUS];
-       volatile u32 __iomem    *isus[MPIC_MAX_ISU];
+       struct mpic_reg_bank    gregs;
+       struct mpic_reg_bank    tmregs;
+       struct mpic_reg_bank    cpuregs[MPIC_MAX_CPUS];
+       struct mpic_reg_bank    isus[MPIC_MAX_ISU];
+
+#ifdef CONFIG_PPC_DCR
+       unsigned int            dcr_base;
+#endif
 
 #ifdef CONFIG_MPIC_WEIRD
        /* Pointer to HW info array */
@@ -305,6 +330,8 @@ struct mpic
 #define MPIC_SPV_EOI                   0x00000020
 /* No passthrough disable */
 #define MPIC_NO_PTHROU_DIS             0x00000040
+/* DCR based MPIC */
+#define MPIC_USES_DCR                  0x00000080
 
 /* MPIC HW modification ID */
 #define MPIC_REGSET_MASK               0xf0000000
@@ -337,7 +364,7 @@ struct mpic
  * that is senses[0] correspond to linux irq "irq_offset".
  */
 extern struct mpic *mpic_alloc(struct device_node *node,
-                              unsigned long phys_addr,
+                              phys_addr_t phys_addr,
                               unsigned int flags,
                               unsigned int isu_size,
                               unsigned int irq_count,
@@ -350,7 +377,7 @@ extern struct mpic *mpic_alloc(struct device_node *node,
  * @phys_addr: physical address of the ISU
  */
 extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
-                           unsigned long phys_addr);
+                           phys_addr_t phys_addr);
 
 /* Set default sense codes
  *
index c5c0b0b3cd52cdbe235bc7277e93c829f048e2fe..a889b2005bf558f66d702e4ba74f27ee1eede700 100644 (file)
@@ -6,12 +6,6 @@
 #include <linux/mod_devicetable.h>
 #include <asm/prom.h>
 
-/*
- * The of_platform_bus_type is a bus type used by drivers that do not
- * attach to a macio or similar bus but still use OF probing
- * mechanism
- */
-extern struct bus_type of_platform_bus_type;
 
 /*
  * The of_device is a kind of "base class" that is a superset of
@@ -20,46 +14,22 @@ extern struct bus_type of_platform_bus_type;
  */
 struct of_device
 {
-       struct device_node      *node;          /* OF device node */
+       struct device_node      *node;          /* to be obsoleted */
        u64                     dma_mask;       /* DMA mask */
        struct device           dev;            /* Generic device interface */
 };
 #define        to_of_device(d) container_of(d, struct of_device, dev)
 
+extern const struct of_device_id *of_match_node(
+       const struct of_device_id *matches, const struct device_node *node);
 extern const struct of_device_id *of_match_device(
        const struct of_device_id *matches, const struct of_device *dev);
 
 extern struct of_device *of_dev_get(struct of_device *dev);
 extern void of_dev_put(struct of_device *dev);
 
-/*
- * An of_platform_driver driver is attached to a basic of_device on
- * the "platform bus" (of_platform_bus_type)
- */
-struct of_platform_driver
-{
-       char                    *name;
-       struct of_device_id     *match_table;
-       struct module           *owner;
-
-       int     (*probe)(struct of_device* dev, const struct of_device_id *match);
-       int     (*remove)(struct of_device* dev);
-
-       int     (*suspend)(struct of_device* dev, pm_message_t state);
-       int     (*resume)(struct of_device* dev);
-       int     (*shutdown)(struct of_device* dev);
-
-       struct device_driver    driver;
-};
-#define        to_of_platform_driver(drv) container_of(drv,struct of_platform_driver, driver)
-
-extern int of_register_driver(struct of_platform_driver *drv);
-extern void of_unregister_driver(struct of_platform_driver *drv);
 extern int of_device_register(struct of_device *ofdev);
 extern void of_device_unregister(struct of_device *ofdev);
-extern struct of_device *of_platform_device_create(struct device_node *np,
-                                                  const char *bus_id,
-                                                  struct device *parent);
 extern void of_release_dev(struct device *dev);
 
 #endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/of_platform.h b/include/asm-powerpc/of_platform.h
new file mode 100644 (file)
index 0000000..217eafb
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ *    Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ *                      <benh@kernel.crashing.org>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/of_device.h>
+
+/*
+ * The of_platform_bus_type is a bus type used by drivers that do not
+ * attach to a macio or similar bus but still use OF probing
+ * mechanism
+ */
+extern struct bus_type of_platform_bus_type;
+
+/*
+ * An of_platform_driver driver is attached to a basic of_device on
+ * the "platform bus" (of_platform_bus_type)
+ */
+struct of_platform_driver
+{
+       char                    *name;
+       struct of_device_id     *match_table;
+       struct module           *owner;
+
+       int     (*probe)(struct of_device* dev,
+                        const struct of_device_id *match);
+       int     (*remove)(struct of_device* dev);
+
+       int     (*suspend)(struct of_device* dev, pm_message_t state);
+       int     (*resume)(struct of_device* dev);
+       int     (*shutdown)(struct of_device* dev);
+
+       struct device_driver    driver;
+};
+#define        to_of_platform_driver(drv) \
+       container_of(drv,struct of_platform_driver, driver)
+
+/* Platform drivers register/unregister */
+extern int of_register_platform_driver(struct of_platform_driver *drv);
+extern void of_unregister_platform_driver(struct of_platform_driver *drv);
+
+/* Platform devices and busses creation */
+extern struct of_device *of_platform_device_create(struct device_node *np,
+                                                  const char *bus_id,
+                                                  struct device *parent);
+/* pseudo "matches" value to not do deep probe */
+#define OF_NO_DEEP_PROBE ((struct of_device_id *)-1)
+
+extern int of_platform_bus_probe(struct device_node *root,
+                                struct of_device_id *matches,
+                                struct device *parent);
+
+extern struct of_device *of_find_device_by_node(struct device_node *np);
+extern struct of_device *of_find_device_by_phandle(phandle ph);
index 07a10e590c1de44d951bb0b25e3bdfd364670f94..71043bf3641f369311c3343c6b0700ff899ea981 100644 (file)
@@ -44,7 +44,9 @@ struct op_powerpc_model {
                           int num_counters);
        void (*cpu_setup) (struct op_counter_config *);
        void (*start) (struct op_counter_config *);
+        void (*global_start) (struct op_counter_config *);
        void (*stop) (void);
+       void (*global_stop) (void);
        void (*handle_interrupt) (struct pt_regs *,
                                  struct op_counter_config *);
        int num_counters;
@@ -54,6 +56,7 @@ extern struct op_powerpc_model op_model_fsl_booke;
 extern struct op_powerpc_model op_model_rs64;
 extern struct op_powerpc_model op_model_power4;
 extern struct op_powerpc_model op_model_7450;
+extern struct op_powerpc_model op_model_cell;
 
 #ifndef CONFIG_FSL_BOOKE
 
index 0a4e5c93e8e6c1d9776347140672f226d375f96a..0d3adc09c847bdf20448a9539bfd0c2e8e6622ac 100644 (file)
@@ -93,7 +93,8 @@ struct paca_struct {
        u64 stab_rr;                    /* stab/slb round-robin counter */
        u64 saved_r1;                   /* r1 save for RTAS calls */
        u64 saved_msr;                  /* MSR saved here by enter_rtas */
-       u8 proc_enabled;                /* irq soft-enable flag */
+       u8 soft_enabled;                /* irq soft-enable flag */
+       u8 hard_enabled;                /* set if irqs are enabled in MSR */
        u8 io_sync;                     /* writel() needs spin_unlock sync */
 
        /* Stuff for accurate time accounting */
index 86ee46b09b8a198edbd4b570784492cd921dd979..7bb7f90098066f81d3ec216141c21294c26ea2ea 100644 (file)
@@ -25,6 +25,7 @@ struct pci_controller {
        int node;
        void *arch_data;
        struct list_head list_node;
+       struct device *parent;
 
        int first_busno;
        int last_busno;
index 721c97f09b2061f0c412dced7946a90fca9dce36..16f13319c7693fbb55cea896bab704885ebd4357 100644 (file)
@@ -70,15 +70,15 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
  */
 #define PCI_DISABLE_MWI
 
-extern struct dma_mapping_ops pci_dma_ops;
+extern struct dma_mapping_ops *pci_dma_ops;
 
 /* For DAC DMA, we currently don't support it by default, but
  * we let 64-bit platforms override this.
  */
 static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
 {
-       if (pci_dma_ops.dac_dma_supported)
-               return pci_dma_ops.dac_dma_supported(&hwdev->dev, mask);
+       if (pci_dma_ops && pci_dma_ops->dac_dma_supported)
+               return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask);
        return 0;
 }
 
@@ -210,6 +210,8 @@ extern int remap_bus_range(struct pci_bus *bus);
 extern void pcibios_fixup_device_resources(struct pci_dev *dev,
                        struct pci_bus *bus);
 
+extern void pcibios_setup_new_device(struct pci_dev *dev);
+
 extern void pcibios_claim_one_bus(struct pci_bus *b);
 
 extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
@@ -232,12 +234,10 @@ extern pgprot_t   pci_phys_mem_access_prot(struct file *file,
                                         unsigned long size,
                                         pgprot_t prot);
 
-#if defined(CONFIG_PPC_MULTIPLATFORM) || defined(CONFIG_PPC32)
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
 extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
                                 const struct resource *rsrc,
                                 resource_size_t *start, resource_size_t *end);
-#endif /* CONFIG_PPC_MULTIPLATFORM || CONFIG_PPC32 */
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_PCI_H */
index ae63db7b3e7d724af37878901a079a97167ab68c..b0830db68f8af1a6ad87b6dd6aa87413aa4d0a90 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 
-extern kmem_cache_t *pgtable_cache[];
+extern struct kmem_cache *pgtable_cache[];
 
 #ifdef CONFIG_PPC_64K_PAGES
 #define PTE_CACHE_NUM  0
index 1115756c79f941a2ea2262030ab60f56a130b69f..ab6eddb518c737e3c59a6c63154215dc3998ad1e 100644 (file)
@@ -36,18 +36,17 @@ typedef void *(*traverse_func)(struct device_node *me, void *data);
 void *traverse_pci_devices(struct device_node *start, traverse_func pre,
                void *data);
 
-void pci_devs_phb_init(void);
-void pci_devs_phb_init_dynamic(struct pci_controller *phb);
-int setup_phb(struct device_node *dev, struct pci_controller *phb);
-void __devinit scan_phb(struct pci_controller *hose);
+extern void pci_devs_phb_init(void);
+extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+extern void scan_phb(struct pci_controller *hose);
 
 /* From rtas_pci.h */
-void init_pci_config_tokens (void);
-unsigned long get_phb_buid (struct device_node *);
+extern void init_pci_config_tokens (void);
+extern unsigned long get_phb_buid (struct device_node *);
+extern int rtas_setup_phb(struct pci_controller *phb);
 
 /* From pSeries_pci.h */
 extern void pSeries_final_fixup(void);
-extern void pSeries_irq_bus_setup(struct pci_bus *bus);
 
 extern unsigned long pci_probe_only;
 
index 6cb6fb19e57f22eb39f45b56b5fd8aa92de28855..a26c32ee5527f300dc45620a7ab582b6bed8d699 100644 (file)
@@ -53,10 +53,6 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
 
 #endif /* CONFIG_PPC_PREP */
 
-#ifndef CONFIG_PPC_MULTIPLATFORM
-#define _machine 0
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
 
 /*
index ec11d44eaeb5d46085f8d3bbffbece4611c7f9a1..0afee17f33b4f5de4e8ad6cc70b171383809cff9 100644 (file)
@@ -17,6 +17,7 @@
  */
 #include <linux/types.h>
 #include <linux/proc_fs.h>
+#include <linux/platform_device.h>
 #include <asm/atomic.h>
 
 /* Definitions used by the flattened device tree */
@@ -333,6 +334,20 @@ extern int of_irq_map_one(struct device_node *device, int index,
 struct pci_dev;
 extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
 
+static inline int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+{
+       int irq = irq_of_parse_and_map(dev, index);
+
+       /* Only dereference the resource if both the
+        * resource and the irq are valid. */
+       if (r && irq != NO_IRQ) {
+               r->start = r->end = irq;
+               r->flags = IORESOURCE_IRQ;
+       }
+
+       return irq;
+}
+
 
 #endif /* __KERNEL__ */
 #endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
new file mode 100644 (file)
index 0000000..52a69ed
--- /dev/null
@@ -0,0 +1,462 @@
+/*
+ *  PS3 platform declarations.
+ *
+ *  Copyright (C) 2006 Sony Computer Entertainment Inc.
+ *  Copyright 2006 Sony Corp.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#if !defined(_ASM_POWERPC_PS3_H)
+#define _ASM_POWERPC_PS3_H
+
+#include <linux/compiler.h> /* for __deprecated */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+/**
+ * struct ps3_device_id - HV bus device identifier from the system repository
+ * @bus_id: HV bus id, {1..} (zero invalid)
+ * @dev_id: HV device id, {0..}
+ */
+
+struct ps3_device_id {
+       unsigned int bus_id;
+       unsigned int dev_id;
+};
+
+
+/* dma routines */
+
+enum ps3_dma_page_size {
+       PS3_DMA_4K = 12U,
+       PS3_DMA_64K = 16U,
+       PS3_DMA_1M = 20U,
+       PS3_DMA_16M = 24U,
+};
+
+enum ps3_dma_region_type {
+       PS3_DMA_OTHER = 0,
+       PS3_DMA_INTERNAL = 2,
+};
+
+/**
+ * struct ps3_dma_region - A per device dma state variables structure
+ * @did: The HV device id.
+ * @page_size: The ioc pagesize.
+ * @region_type: The HV region type.
+ * @bus_addr: The 'translated' bus address of the region.
+ * @len: The length in bytes of the region.
+ * @chunk_list: Opaque variable used by the ioc page manager.
+ */
+
+struct ps3_dma_region {
+       struct ps3_device_id did;
+       enum ps3_dma_page_size page_size;
+       enum ps3_dma_region_type region_type;
+       unsigned long bus_addr;
+       unsigned long len;
+       struct {
+               spinlock_t lock;
+               struct list_head head;
+       } chunk_list;
+};
+
+/**
+ * struct ps3_dma_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+static inline void ps3_dma_region_init(struct ps3_dma_region *r,
+       const struct ps3_device_id* did, enum ps3_dma_page_size page_size,
+       enum ps3_dma_region_type region_type)
+{
+       r->did = *did;
+       r->page_size = page_size;
+       r->region_type = region_type;
+}
+int ps3_dma_region_create(struct ps3_dma_region *r);
+int ps3_dma_region_free(struct ps3_dma_region *r);
+int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
+       unsigned long len, unsigned long *bus_addr);
+int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
+       unsigned long len);
+
+/* mmio routines */
+
+enum ps3_mmio_page_size {
+       PS3_MMIO_4K = 12U,
+       PS3_MMIO_64K = 16U
+};
+
+/**
+ * struct ps3_mmio_region - a per device mmio state variables structure
+ *
+ * Current systems can be supported with a single region per device.
+ */
+
+struct ps3_mmio_region {
+       struct ps3_device_id did;
+       unsigned long bus_addr;
+       unsigned long len;
+       enum ps3_mmio_page_size page_size;
+       unsigned long lpar_addr;
+};
+
+/**
+ * struct ps3_mmio_region_init - Helper to initialize structure variables
+ *
+ * Helper to properly initialize variables prior to calling
+ * ps3_system_bus_device_register.
+ */
+
+static inline void ps3_mmio_region_init(struct ps3_mmio_region *r,
+       const struct ps3_device_id* did, unsigned long bus_addr,
+       unsigned long len, enum ps3_mmio_page_size page_size)
+{
+       r->did = *did;
+       r->bus_addr = bus_addr;
+       r->len = len;
+       r->page_size = page_size;
+}
+int ps3_mmio_region_create(struct ps3_mmio_region *r);
+int ps3_free_mmio_region(struct ps3_mmio_region *r);
+unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr);
+
+/* inrerrupt routines */
+
+int ps3_alloc_io_irq(unsigned int interrupt_id, unsigned int *virq);
+int ps3_free_io_irq(unsigned int virq);
+int ps3_alloc_event_irq(unsigned int *virq);
+int ps3_free_event_irq(unsigned int virq);
+int ps3_send_event_locally(unsigned int virq);
+int ps3_connect_event_irq(const struct ps3_device_id *did,
+       unsigned int interrupt_id, unsigned int *virq);
+int ps3_disconnect_event_irq(const struct ps3_device_id *did,
+       unsigned int interrupt_id, unsigned int virq);
+int ps3_alloc_vuart_irq(void* virt_addr_bmp, unsigned int *virq);
+int ps3_free_vuart_irq(unsigned int virq);
+int ps3_alloc_spe_irq(unsigned long spe_id, unsigned int class,
+       unsigned int *virq);
+int ps3_free_spe_irq(unsigned int virq);
+
+/* lv1 result codes */
+
+enum lv1_result {
+       LV1_SUCCESS                     = 0,
+       /* not used                       -1 */
+       LV1_RESOURCE_SHORTAGE           = -2,
+       LV1_NO_PRIVILEGE                = -3,
+       LV1_DENIED_BY_POLICY            = -4,
+       LV1_ACCESS_VIOLATION            = -5,
+       LV1_NO_ENTRY                    = -6,
+       LV1_DUPLICATE_ENTRY             = -7,
+       LV1_TYPE_MISMATCH               = -8,
+       LV1_BUSY                        = -9,
+       LV1_EMPTY                       = -10,
+       LV1_WRONG_STATE                 = -11,
+       /* not used                       -12 */
+       LV1_NO_MATCH                    = -13,
+       LV1_ALREADY_CONNECTED           = -14,
+       LV1_UNSUPPORTED_PARAMETER_VALUE = -15,
+       LV1_CONDITION_NOT_SATISFIED     = -16,
+       LV1_ILLEGAL_PARAMETER_VALUE     = -17,
+       LV1_BAD_OPTION                  = -18,
+       LV1_IMPLEMENTATION_LIMITATION   = -19,
+       LV1_NOT_IMPLEMENTED             = -20,
+       LV1_INVALID_CLASS_ID            = -21,
+       LV1_CONSTRAINT_NOT_SATISFIED    = -22,
+       LV1_ALIGNMENT_ERROR             = -23,
+       LV1_INTERNAL_ERROR              = -32768,
+};
+
+static inline const char* ps3_result(int result)
+{
+#if defined(DEBUG)
+       switch (result) {
+       case LV1_SUCCESS:
+               return "LV1_SUCCESS (0)";
+       case -1:
+               return "** unknown result ** (-1)";
+       case LV1_RESOURCE_SHORTAGE:
+               return "LV1_RESOURCE_SHORTAGE (-2)";
+       case LV1_NO_PRIVILEGE:
+               return "LV1_NO_PRIVILEGE (-3)";
+       case LV1_DENIED_BY_POLICY:
+               return "LV1_DENIED_BY_POLICY (-4)";
+       case LV1_ACCESS_VIOLATION:
+               return "LV1_ACCESS_VIOLATION (-5)";
+       case LV1_NO_ENTRY:
+               return "LV1_NO_ENTRY (-6)";
+       case LV1_DUPLICATE_ENTRY:
+               return "LV1_DUPLICATE_ENTRY (-7)";
+       case LV1_TYPE_MISMATCH:
+               return "LV1_TYPE_MISMATCH (-8)";
+       case LV1_BUSY:
+               return "LV1_BUSY (-9)";
+       case LV1_EMPTY:
+               return "LV1_EMPTY (-10)";
+       case LV1_WRONG_STATE:
+               return "LV1_WRONG_STATE (-11)";
+       case -12:
+               return "** unknown result ** (-12)";
+       case LV1_NO_MATCH:
+               return "LV1_NO_MATCH (-13)";
+       case LV1_ALREADY_CONNECTED:
+               return "LV1_ALREADY_CONNECTED (-14)";
+       case LV1_UNSUPPORTED_PARAMETER_VALUE:
+               return "LV1_UNSUPPORTED_PARAMETER_VALUE (-15)";
+       case LV1_CONDITION_NOT_SATISFIED:
+               return "LV1_CONDITION_NOT_SATISFIED (-16)";
+       case LV1_ILLEGAL_PARAMETER_VALUE:
+               return "LV1_ILLEGAL_PARAMETER_VALUE (-17)";
+       case LV1_BAD_OPTION:
+               return "LV1_BAD_OPTION (-18)";
+       case LV1_IMPLEMENTATION_LIMITATION:
+               return "LV1_IMPLEMENTATION_LIMITATION (-19)";
+       case LV1_NOT_IMPLEMENTED:
+               return "LV1_NOT_IMPLEMENTED (-20)";
+       case LV1_INVALID_CLASS_ID:
+               return "LV1_INVALID_CLASS_ID (-21)";
+       case LV1_CONSTRAINT_NOT_SATISFIED:
+               return "LV1_CONSTRAINT_NOT_SATISFIED (-22)";
+       case LV1_ALIGNMENT_ERROR:
+               return "LV1_ALIGNMENT_ERROR (-23)";
+       case LV1_INTERNAL_ERROR:
+               return "LV1_INTERNAL_ERROR (-32768)";
+       default:
+               BUG();
+               return "** unknown result **";
+       };
+#else
+       return "";
+#endif
+}
+
+/* repository bus info */
+
+enum ps3_bus_type {
+       PS3_BUS_TYPE_SB = 4,
+       PS3_BUS_TYPE_STORAGE = 5,
+};
+
+enum ps3_dev_type {
+       PS3_DEV_TYPE_SB_GELIC = 3,
+       PS3_DEV_TYPE_SB_USB = 4,
+       PS3_DEV_TYPE_SB_GPIO = 6,
+};
+
+int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
+       u64 *value);
+int ps3_repository_read_bus_id(unsigned int bus_index, unsigned int *bus_id);
+int ps3_repository_read_bus_type(unsigned int bus_index,
+       enum ps3_bus_type *bus_type);
+int ps3_repository_read_bus_num_dev(unsigned int bus_index,
+       unsigned int *num_dev);
+
+/* repository bus device info */
+
+enum ps3_interrupt_type {
+       PS3_INTERRUPT_TYPE_EVENT_PORT = 2,
+       PS3_INTERRUPT_TYPE_SB_OHCI = 3,
+       PS3_INTERRUPT_TYPE_SB_EHCI = 4,
+       PS3_INTERRUPT_TYPE_OTHER = 5,
+};
+
+enum ps3_region_type {
+       PS3_REGION_TYPE_SB_OHCI = 3,
+       PS3_REGION_TYPE_SB_EHCI = 4,
+       PS3_REGION_TYPE_SB_GPIO = 5,
+};
+
+int ps3_repository_read_dev_str(unsigned int bus_index,
+       unsigned int dev_index, const char *dev_str, u64 *value);
+int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
+       unsigned int *dev_id);
+int ps3_repository_read_dev_type(unsigned int bus_index,
+       unsigned int dev_index, enum ps3_dev_type *dev_type);
+int ps3_repository_read_dev_intr(unsigned int bus_index,
+       unsigned int dev_index, unsigned int intr_index,
+       enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id);
+int ps3_repository_read_dev_reg_type(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index,
+       enum ps3_region_type *reg_type);
+int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index, u64 *bus_addr,
+       u64 *len);
+int ps3_repository_read_dev_reg(unsigned int bus_index,
+       unsigned int dev_index, unsigned int reg_index,
+       enum ps3_region_type *reg_type, u64 *bus_addr, u64 *len);
+
+/* repository bus enumerators */
+
+struct ps3_repository_device {
+       unsigned int bus_index;
+       unsigned int dev_index;
+       struct ps3_device_id did;
+};
+
+int ps3_repository_find_device(enum ps3_bus_type bus_type,
+       enum ps3_dev_type dev_type,
+       const struct ps3_repository_device *start_dev,
+       struct ps3_repository_device *dev);
+static inline int ps3_repository_find_first_device(
+       enum ps3_bus_type bus_type, enum ps3_dev_type dev_type,
+       struct ps3_repository_device *dev)
+{
+       return ps3_repository_find_device(bus_type, dev_type, NULL, dev);
+}
+int ps3_repository_find_interrupt(const struct ps3_repository_device *dev,
+       enum ps3_interrupt_type intr_type, unsigned int *interrupt_id);
+int ps3_repository_find_region(const struct ps3_repository_device *dev,
+       enum ps3_region_type reg_type, u64 *bus_addr, u64 *len);
+
+/* repository block device info */
+
+int ps3_repository_read_dev_port(unsigned int bus_index,
+       unsigned int dev_index, u64 *port);
+int ps3_repository_read_dev_blk_size(unsigned int bus_index,
+       unsigned int dev_index, u64 *blk_size);
+int ps3_repository_read_dev_num_blocks(unsigned int bus_index,
+       unsigned int dev_index, u64 *num_blocks);
+int ps3_repository_read_dev_num_regions(unsigned int bus_index,
+       unsigned int dev_index, unsigned int *num_regions);
+int ps3_repository_read_dev_region_id(unsigned int bus_index,
+       unsigned int dev_index, unsigned int region_index,
+       unsigned int *region_id);
+int ps3_repository_read_dev_region_size(unsigned int bus_index,
+       unsigned int dev_index, unsigned int region_index, u64 *region_size);
+int ps3_repository_read_dev_region_start(unsigned int bus_index,
+       unsigned int dev_index, unsigned int region_index, u64 *region_start);
+
+/* repository pu and memory info */
+
+int ps3_repository_read_num_pu(unsigned int *num_pu);
+int ps3_repository_read_ppe_id(unsigned int *pu_index, unsigned int *ppe_id);
+int ps3_repository_read_rm_base(unsigned int ppe_id, u64 *rm_base);
+int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size);
+int ps3_repository_read_region_total(u64 *region_total);
+int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size,
+       u64 *region_total);
+
+/* repository pme info */
+
+int ps3_repository_read_num_be(unsigned int *num_be);
+int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id);
+int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
+int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
+
+/* repository 'Other OS' area */
+
+int ps3_repository_read_boot_dat_addr(u64 *lpar_addr);
+int ps3_repository_read_boot_dat_size(unsigned int *size);
+int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size);
+
+/* repository spu info */
+
+/**
+ * enum spu_resource_type - Type of spu resource.
+ * @spu_resource_type_shared: Logical spu is shared with other partions.
+ * @spu_resource_type_exclusive: Logical spu is not shared with other partions.
+ *
+ * Returned by ps3_repository_read_spu_resource_id().
+ */
+
+enum ps3_spu_resource_type {
+       PS3_SPU_RESOURCE_TYPE_SHARED = 0,
+       PS3_SPU_RESOURCE_TYPE_EXCLUSIVE = 0x8000000000000000UL,
+};
+
+int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved);
+int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id);
+int ps3_repository_read_spu_resource_id(unsigned int res_index,
+       enum ps3_spu_resource_type* resource_type, unsigned int *resource_id);
+
+
+/* system bus routines */
+
+enum ps3_match_id {
+       PS3_MATCH_ID_EHCI = 1,
+       PS3_MATCH_ID_OHCI,
+       PS3_MATCH_ID_GELIC,
+       PS3_MATCH_ID_AV_SETTINGS,
+       PS3_MATCH_ID_SYSTEM_MANAGER,
+};
+
+/**
+ * struct ps3_system_bus_device - a device on the system bus
+ */
+
+struct ps3_system_bus_device {
+       enum ps3_match_id match_id;
+       struct ps3_device_id did;
+       unsigned int interrupt_id;
+/*     struct iommu_table *iommu_table; -- waiting for Ben's cleanups */
+       struct ps3_dma_region *d_region;
+       struct ps3_mmio_region *m_region;
+       struct device core;
+};
+
+/**
+ * struct ps3_system_bus_driver - a driver for a device on the system bus
+ */
+
+struct ps3_system_bus_driver {
+       enum ps3_match_id match_id;
+       struct device_driver core;
+       int (*probe)(struct ps3_system_bus_device *);
+       int (*remove)(struct ps3_system_bus_device *);
+/*     int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
+/*     int (*resume)(struct ps3_system_bus_device *); */
+};
+
+int ps3_system_bus_device_register(struct ps3_system_bus_device *dev);
+int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv);
+void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv);
+static inline struct ps3_system_bus_driver *to_ps3_system_bus_driver(
+       struct device_driver *_drv)
+{
+       return container_of(_drv, struct ps3_system_bus_driver, core);
+}
+static inline struct ps3_system_bus_device *to_ps3_system_bus_device(
+       struct device *_dev)
+{
+       return container_of(_dev, struct ps3_system_bus_device, core);
+}
+
+/**
+ * ps3_system_bus_set_drvdata -
+ * @dev: device structure
+ * @data: Data to set
+ */
+
+static inline void ps3_system_bus_set_driver_data(
+       struct ps3_system_bus_device *dev, void *data)
+{
+       dev->core.driver_data = data;
+}
+static inline void *ps3_system_bus_get_driver_data(
+       struct ps3_system_bus_device *dev)
+{
+       return dev->core.driver_data;
+}
+
+/* These two need global scope for get_dma_ops(). */
+
+extern struct bus_type ps3_system_bus_type;
+
+#endif
index d34f9e1f242c0cf0009b5d1c15182b9d4abe179c..5a0c136c04168f94e8d235a6308702c4cf712c5e 100644 (file)
@@ -54,8 +54,6 @@ struct rtas_args {
        rtas_arg_t *rets;     /* Pointer to return values in args[]. */
 };  
 
-extern struct rtas_args rtas_stop_self_args;
-
 struct rtas_t {
        unsigned long entry;            /* physical address pointer */
        unsigned long base;             /* physical address pointer */
index 3d9740aae018d76b877ad935673568da1663bb81..817fac0a0714a8705d6b6d8ce1bd85ef85523822 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef _ASM_POWERPC_SETUP_H
 #define _ASM_POWERPC_SETUP_H
 
-#ifdef __KERNEL__
-
 #define COMMAND_LINE_SIZE      512
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_SETUP_H */
index 38b1ea3b58fd43ba801aeca6dbce186616b60ecd..48ad807a0b8a80ce49788e49935d081585166009 100644 (file)
@@ -9,8 +9,14 @@
  * MAX_PHYSMEM_BITS            2^N: how much memory we can have in that space
  */
 #define SECTION_SIZE_BITS       24
+
+#if defined(CONFIG_PS3_USE_LPAR_ADDR)
+#define MAX_PHYSADDR_BITS       47
+#define MAX_PHYSMEM_BITS        47
+#else
 #define MAX_PHYSADDR_BITS       44
 #define MAX_PHYSMEM_BITS        44
+#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern void create_section_mapping(unsigned long start, unsigned long end);
index e73ea00efd8b2ea7917a9820edbd94225b695cb6..fdad4267b447d10dbd48e9219ed45fa35b2475f3 100644 (file)
@@ -111,14 +111,12 @@ struct spu {
        u8 *local_store;
        unsigned long problem_phys;
        struct spu_problem __iomem *problem;
-       struct spu_priv1 __iomem *priv1;
        struct spu_priv2 __iomem *priv2;
        struct list_head list;
        struct list_head sched_list;
+       struct list_head full_list;
        int number;
-       int nid;
        unsigned int irqs[3];
-       u32 isrc;
        u32 node;
        u64 flags;
        u64 dar;
@@ -144,6 +142,7 @@ struct spu {
        char irq_c1[8];
        char irq_c2[8];
 
+       void* pdata; /* platform private data */
        struct sys_device sysdev;
 };
 
@@ -170,6 +169,13 @@ extern struct spufs_calls {
        struct module *owner;
 } spufs_calls;
 
+/* coredump calls implemented in spufs */
+struct spu_coredump_calls {
+       asmlinkage int (*arch_notes_size)(void);
+       asmlinkage void (*arch_write_notes)(struct file *file);
+       struct module *owner;
+};
+
 /* return status from spu_run, same as in libspe */
 #define SPE_EVENT_DMA_ALIGNMENT                0x0008  /*A DMA alignment error */
 #define SPE_EVENT_SPE_ERROR            0x0010  /*An illegal instruction error*/
@@ -182,8 +188,10 @@ extern struct spufs_calls {
  */
 #define SPU_CREATE_EVENTS_ENABLED      0x0001
 #define SPU_CREATE_GANG                        0x0002
+#define SPU_CREATE_NOSCHED             0x0004
+#define SPU_CREATE_ISOLATE             0x0008
 
-#define SPU_CREATE_FLAG_ALL            0x0003 /* mask of all valid flags */
+#define SPU_CREATE_FLAG_ALL            0x000f /* mask of all valid flags */
 
 
 #ifdef CONFIG_SPU_FS_MODULE
@@ -199,6 +207,15 @@ static inline void unregister_spu_syscalls(struct spufs_calls *calls)
 }
 #endif /* MODULE */
 
+int register_arch_coredump_calls(struct spu_coredump_calls *calls);
+void unregister_arch_coredump_calls(struct spu_coredump_calls *calls);
+
+int spu_add_sysdev_attr(struct sysdev_attribute *attr);
+void spu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+int spu_add_sysdev_attr_group(struct attribute_group *attrs);
+void spu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
 
 /*
  * Notifier blocks:
@@ -277,6 +294,7 @@ struct spu_problem {
        u32 spu_runcntl_RW;                                     /* 0x401c */
 #define SPU_RUNCNTL_STOP       0L
 #define SPU_RUNCNTL_RUNNABLE   1L
+#define SPU_RUNCNTL_ISOLATE    2L
        u8  pad_0x4020_0x4024[0x4];                             /* 0x4020 */
        u32 spu_status_R;                                       /* 0x4024 */
 #define SPU_STOP_STATUS_SHIFT           16
@@ -289,8 +307,8 @@ struct spu_problem {
 #define SPU_STATUS_INVALID_INSTR        0x20
 #define SPU_STATUS_INVALID_CH           0x40
 #define SPU_STATUS_ISOLATED_STATE       0x80
-#define SPU_STATUS_ISOLATED_LOAD_STAUTUS 0x200
-#define SPU_STATUS_ISOLATED_EXIT_STAUTUS 0x400
+#define SPU_STATUS_ISOLATED_LOAD_STATUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STATUS 0x400
        u8  pad_0x4028_0x402c[0x4];                             /* 0x4028 */
        u32 spu_spe_R;                                          /* 0x402c */
        u8  pad_0x4030_0x4034[0x4];                             /* 0x4030 */
index 964c2d38ccb75123421cde8a4716c7a86ba20102..bdbf906a767f0ef75201514681a387ad7f291dc9 100644 (file)
@@ -151,7 +151,6 @@ struct spu_priv1_collapsed {
        u64 mfc_fir_chkstp_enable_RW;
        u64 smf_sbi_signal_sel;
        u64 smf_ato_signal_sel;
-       u64 mfc_sdr_RW;
        u64 tlb_index_hint_RO;
        u64 tlb_index_W;
        u64 tlb_vpn_RW;
diff --git a/include/asm-powerpc/spu_info.h b/include/asm-powerpc/spu_info.h
new file mode 100644 (file)
index 0000000..3545efb
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * SPU info structures
+ *
+ * (C) Copyright 2006 IBM Corp.
+ *
+ * Author: Dwayne Grant McConnell <decimal@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.        See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_INFO_H
+#define _SPU_INFO_H
+
+#ifdef __KERNEL__
+#include <asm/spu.h>
+#include <linux/types.h>
+#else
+struct mfc_cq_sr {
+       __u64 mfc_cq_data0_RW;
+       __u64 mfc_cq_data1_RW;
+       __u64 mfc_cq_data2_RW;
+       __u64 mfc_cq_data3_RW;
+};
+#endif /* __KERNEL__ */
+
+struct spu_dma_info {
+       __u64 dma_info_type;
+       __u64 dma_info_mask;
+       __u64 dma_info_status;
+       __u64 dma_info_stall_and_notify;
+       __u64 dma_info_atomic_command_status;
+       struct mfc_cq_sr dma_info_command_data[16];
+};
+
+struct spu_proxydma_info {
+       __u64 proxydma_info_type;
+       __u64 proxydma_info_mask;
+       __u64 proxydma_info_status;
+       struct mfc_cq_sr proxydma_info_command_data[8];
+};
+
+#endif
index 300c458b6d06b1839550b146873a3e1c618e6115..69dcb0c53884f64f82960239d87462257cbc3e17 100644 (file)
 #define _SPU_PRIV1_H
 #if defined(__KERNEL__)
 
+#include <linux/types.h>
+
 struct spu;
 
 /* access to priv1 registers */
 
-struct spu_priv1_ops
-{
+struct spu_priv1_ops {
        void (*int_mask_and) (struct spu *spu, int class, u64 mask);
        void (*int_mask_or) (struct spu *spu, int class, u64 mask);
        void (*int_mask_set) (struct spu *spu, int class, u64 mask);
@@ -37,7 +38,7 @@ struct spu_priv1_ops
        u64 (*mfc_dar_get) (struct spu *spu);
        u64 (*mfc_dsisr_get) (struct spu *spu);
        void (*mfc_dsisr_set) (struct spu *spu, u64 dsisr);
-       void (*mfc_sdr_set) (struct spu *spu, u64 sdr);
+       void (*mfc_sdr_setup) (struct spu *spu);
        void (*mfc_sr1_set) (struct spu *spu, u64 sr1);
        u64 (*mfc_sr1_get) (struct spu *spu);
        void (*mfc_tclass_id_set) (struct spu *spu, u64 tclass_id);
@@ -112,9 +113,9 @@ spu_mfc_dsisr_set (struct spu *spu, u64 dsisr)
 }
 
 static inline void
-spu_mfc_sdr_set (struct spu *spu, u64 sdr)
+spu_mfc_sdr_setup (struct spu *spu)
 {
-       spu_priv1_ops->mfc_sdr_set(spu, sdr);
+       spu_priv1_ops->mfc_sdr_setup(spu);
 }
 
 static inline void
@@ -171,12 +172,41 @@ spu_resource_allocation_enable_get (struct spu *spu)
        return spu_priv1_ops->resource_allocation_enable_get(spu);
 }
 
-/* The declarations folowing are put here for convenience
- * and only intended to be used by the platform setup code
- * for initializing spu_priv1_ops.
+/* spu management abstraction */
+
+struct spu_management_ops {
+       int (*enumerate_spus)(int (*fn)(void *data));
+       int (*create_spu)(struct spu *spu, void *data);
+       int (*destroy_spu)(struct spu *spu);
+};
+
+extern const struct spu_management_ops* spu_management_ops;
+
+static inline int
+spu_enumerate_spus (int (*fn)(void *data))
+{
+       return spu_management_ops->enumerate_spus(fn);
+}
+
+static inline int
+spu_create_spu (struct spu *spu, void *data)
+{
+       return spu_management_ops->create_spu(spu, data);
+}
+
+static inline int
+spu_destroy_spu (struct spu *spu)
+{
+       return spu_management_ops->destroy_spu(spu);
+}
+
+/*
+ * The declarations folowing are put here for convenience
+ * and only intended to be used by the platform setup code.
  */
 
 extern const struct spu_priv1_ops spu_priv1_mmio_ops;
+extern const struct spu_management_ops spu_management_of_ops;
 
 #endif /* __KERNEL__ */
 #endif
diff --git a/include/asm-powerpc/todc.h b/include/asm-powerpc/todc.h
deleted file mode 100644 (file)
index 60a8c39..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Definitions for the M48Txx and mc146818 series of Time of day/Real Time
- * Clock chips.
- *
- * Author: Mark A. Greer <mgreer@mvista.com>
- *
- * 2001 (c) MontaVista, Software, Inc.  This file is licensed under
- * the terms of the GNU General Public License version 2.  This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/*
- * Support for the M48T37/M48T59/.../mc146818 Real Time Clock chips.
- * Purpose is to make one generic file that handles all of these chips instead
- * of every platform implementing the same code over & over again.
- */
-
-#ifndef __PPC_KERNEL_TODC_H
-#define __PPC_KERNEL_TODC_H
-
-typedef struct {
-       uint rtc_type;          /* your particular chip */
-
-       /*
-        * Following are the addresses of the AS0, AS1, and DATA registers
-        * of these chips.  Note that these are board-specific.
-        */
-       unsigned int nvram_as0;
-       unsigned int nvram_as1;
-       unsigned int nvram_data;
-
-       /*
-        * Define bits to stop external set of regs from changing so
-        * the chip can be read/written reliably.
-        */
-       unsigned char enable_read;
-       unsigned char enable_write;
-
-       /*
-        * Following is the number of AS0 address bits.  This is normally
-        * 8 but some bad hardware routes address lines incorrectly.
-        */
-       int as0_bits;
-
-       int nvram_size; /* Size of NVRAM on chip */
-       int sw_flags;   /* Software control flags */
-
-       /* Following are the register offsets for the particular chip */
-       int year;
-       int month;
-       int day_of_month;
-       int day_of_week;
-       int hours;
-       int minutes;
-       int seconds;
-       int control_b;
-       int control_a;
-       int watchdog;
-       int interrupts;
-       int alarm_date;
-       int alarm_hour;
-       int alarm_minutes;
-       int alarm_seconds;
-       int century;
-       int flags;
-
-       /*
-        * Some RTC chips have their NVRAM buried behind a addr/data pair of
-        * regs on the first level/clock registers.  The following fields
-        * are the addresses for those addr/data regs.
-        */
-       int nvram_addr_reg;
-       int nvram_data_reg;
-} todc_info_t;
-
-/*
- * Define the types of TODC/RTC variants that are supported in
- * arch/ppc/kernel/todc_time.c
- * Make a new one of these for any chip somehow differs from what's already
- * defined.  That way, if you ever need to put in code to touch those
- * bits/registers in todc_time.c, you can put it inside an
- * 'if (todc_info->rtc_type == TODC_TYPE_XXX)' so you won't break
- * anyone else.
- */
-#define TODC_TYPE_MK48T35              1
-#define TODC_TYPE_MK48T37              2
-#define TODC_TYPE_MK48T59              3
-#define TODC_TYPE_DS1693               4       /* Dallas DS1693 RTC */
-#define TODC_TYPE_DS1743               5       /* Dallas DS1743 RTC */
-#define TODC_TYPE_DS1746               6       /* Dallas DS1746 RTC */
-#define TODC_TYPE_DS1747               7       /* Dallas DS1747 RTC */
-#define TODC_TYPE_DS1501               8       /* Dallas DS1501 RTC */
-#define TODC_TYPE_DS1643               9       /* Dallas DS1643 RTC */
-#define TODC_TYPE_PC97307              10      /* PC97307 internal RTC */
-#define TODC_TYPE_DS1557               11      /* Dallas DS1557 RTC */
-#define TODC_TYPE_DS17285              12      /* Dallas DS17285 RTC */
-#define TODC_TYPE_DS1553               13      /* Dallas DS1553 RTC */
-#define TODC_TYPE_MC146818             100     /* Leave room for m48txx's */
-
-/*
- * Bit to clear/set to enable reads/writes to the chip
- */
-#define TODC_MK48TXX_CNTL_A_R          0x40
-#define TODC_MK48TXX_CNTL_A_W          0x80
-#define TODC_MK48TXX_DAY_CB            0x80
-
-#define TODC_DS1501_CNTL_B_TE          0x80
-
-/*
- * Define flag bits used by todc routines.
- */
-#define TODC_FLAG_2_LEVEL_NVRAM                0x00000001
-
-/*
- * Define the values for the various RTC's that should to into the todc_info
- * table.
- * Note: The XXX_NVRAM_SIZE, XXX_NVRAM_ADDR_REG, and XXX_NVRAM_DATA_REG only
- * matter if XXX_SW_FLAGS has TODC_FLAG_2_LEVEL_NVRAM set.
- */
-#define TODC_TYPE_MK48T35_NVRAM_SIZE           0x7ff8
-#define TODC_TYPE_MK48T35_SW_FLAGS             0
-#define TODC_TYPE_MK48T35_YEAR                 0x7fff
-#define TODC_TYPE_MK48T35_MONTH                        0x7ffe
-#define TODC_TYPE_MK48T35_DOM                  0x7ffd  /* Day of Month */
-#define TODC_TYPE_MK48T35_DOW                  0x7ffc  /* Day of Week */
-#define TODC_TYPE_MK48T35_HOURS                        0x7ffb
-#define TODC_TYPE_MK48T35_MINUTES              0x7ffa
-#define TODC_TYPE_MK48T35_SECONDS              0x7ff9
-#define TODC_TYPE_MK48T35_CNTL_B               0x7ff9
-#define TODC_TYPE_MK48T35_CNTL_A               0x7ff8
-#define TODC_TYPE_MK48T35_WATCHDOG             0x0000
-#define TODC_TYPE_MK48T35_INTERRUPTS           0x0000
-#define TODC_TYPE_MK48T35_ALARM_DATE           0x0000
-#define TODC_TYPE_MK48T35_ALARM_HOUR           0x0000
-#define TODC_TYPE_MK48T35_ALARM_MINUTES                0x0000
-#define TODC_TYPE_MK48T35_ALARM_SECONDS                0x0000
-#define TODC_TYPE_MK48T35_CENTURY              0x0000
-#define TODC_TYPE_MK48T35_FLAGS                        0x0000
-#define TODC_TYPE_MK48T35_NVRAM_ADDR_REG       0
-#define TODC_TYPE_MK48T35_NVRAM_DATA_REG       0
-
-#define TODC_TYPE_MK48T37_NVRAM_SIZE           0x7ff0
-#define TODC_TYPE_MK48T37_SW_FLAGS             0
-#define TODC_TYPE_MK48T37_YEAR                 0x7fff
-#define TODC_TYPE_MK48T37_MONTH                        0x7ffe
-#define TODC_TYPE_MK48T37_DOM                  0x7ffd  /* Day of Month */
-#define TODC_TYPE_MK48T37_DOW                  0x7ffc  /* Day of Week */
-#define TODC_TYPE_MK48T37_HOURS                        0x7ffb
-#define TODC_TYPE_MK48T37_MINUTES              0x7ffa
-#define TODC_TYPE_MK48T37_SECONDS              0x7ff9
-#define TODC_TYPE_MK48T37_CNTL_B               0x7ff9
-#define TODC_TYPE_MK48T37_CNTL_A               0x7ff8
-#define TODC_TYPE_MK48T37_WATCHDOG             0x7ff7
-#define TODC_TYPE_MK48T37_INTERRUPTS           0x7ff6
-#define TODC_TYPE_MK48T37_ALARM_DATE           0x7ff5
-#define TODC_TYPE_MK48T37_ALARM_HOUR           0x7ff4
-#define TODC_TYPE_MK48T37_ALARM_MINUTES                0x7ff3
-#define TODC_TYPE_MK48T37_ALARM_SECONDS                0x7ff2
-#define TODC_TYPE_MK48T37_CENTURY              0x7ff1
-#define TODC_TYPE_MK48T37_FLAGS                        0x7ff0
-#define TODC_TYPE_MK48T37_NVRAM_ADDR_REG       0
-#define TODC_TYPE_MK48T37_NVRAM_DATA_REG       0
-
-#define TODC_TYPE_MK48T59_NVRAM_SIZE           0x1ff0
-#define TODC_TYPE_MK48T59_SW_FLAGS             0
-#define TODC_TYPE_MK48T59_YEAR                 0x1fff
-#define TODC_TYPE_MK48T59_MONTH                        0x1ffe
-#define TODC_TYPE_MK48T59_DOM                  0x1ffd  /* Day of Month */
-#define TODC_TYPE_MK48T59_DOW                  0x1ffc  /* Day of Week */
-#define TODC_TYPE_MK48T59_HOURS                        0x1ffb
-#define TODC_TYPE_MK48T59_MINUTES              0x1ffa
-#define TODC_TYPE_MK48T59_SECONDS              0x1ff9
-#define TODC_TYPE_MK48T59_CNTL_B               0x1ff9
-#define TODC_TYPE_MK48T59_CNTL_A               0x1ff8
-#define TODC_TYPE_MK48T59_WATCHDOG             0x1fff
-#define TODC_TYPE_MK48T59_INTERRUPTS           0x1fff
-#define TODC_TYPE_MK48T59_ALARM_DATE           0x1fff
-#define TODC_TYPE_MK48T59_ALARM_HOUR           0x1fff
-#define TODC_TYPE_MK48T59_ALARM_MINUTES                0x1fff
-#define TODC_TYPE_MK48T59_ALARM_SECONDS                0x1fff
-#define TODC_TYPE_MK48T59_CENTURY              0x1fff
-#define TODC_TYPE_MK48T59_FLAGS                        0x1fff
-#define TODC_TYPE_MK48T59_NVRAM_ADDR_REG       0
-#define TODC_TYPE_MK48T59_NVRAM_DATA_REG       0
-
-#define TODC_TYPE_DS1501_NVRAM_SIZE    0x100
-#define TODC_TYPE_DS1501_SW_FLAGS      TODC_FLAG_2_LEVEL_NVRAM
-#define TODC_TYPE_DS1501_YEAR          (TODC_TYPE_DS1501_NVRAM_SIZE + 0x06)
-#define TODC_TYPE_DS1501_MONTH         (TODC_TYPE_DS1501_NVRAM_SIZE + 0x05)
-#define TODC_TYPE_DS1501_DOM           (TODC_TYPE_DS1501_NVRAM_SIZE + 0x04)
-#define TODC_TYPE_DS1501_DOW           (TODC_TYPE_DS1501_NVRAM_SIZE + 0x03)
-#define TODC_TYPE_DS1501_HOURS         (TODC_TYPE_DS1501_NVRAM_SIZE + 0x02)
-#define TODC_TYPE_DS1501_MINUTES       (TODC_TYPE_DS1501_NVRAM_SIZE + 0x01)
-#define TODC_TYPE_DS1501_SECONDS       (TODC_TYPE_DS1501_NVRAM_SIZE + 0x00)
-#define TODC_TYPE_DS1501_CNTL_B                (TODC_TYPE_DS1501_NVRAM_SIZE + 0x0f)
-#define TODC_TYPE_DS1501_CNTL_A                (TODC_TYPE_DS1501_NVRAM_SIZE + 0x0f)
-#define TODC_TYPE_DS1501_WATCHDOG      (TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_INTERRUPTS    (TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_ALARM_DATE    (TODC_TYPE_DS1501_NVRAM_SIZE + 0x0b)
-#define TODC_TYPE_DS1501_ALARM_HOUR    (TODC_TYPE_DS1501_NVRAM_SIZE + 0x0a)
-#define TODC_TYPE_DS1501_ALARM_MINUTES (TODC_TYPE_DS1501_NVRAM_SIZE + 0x09)
-#define TODC_TYPE_DS1501_ALARM_SECONDS (TODC_TYPE_DS1501_NVRAM_SIZE + 0x08)
-#define TODC_TYPE_DS1501_CENTURY       (TODC_TYPE_DS1501_NVRAM_SIZE + 0x07)
-#define TODC_TYPE_DS1501_FLAGS         (TODC_TYPE_DS1501_NVRAM_SIZE + 0xff)
-#define TODC_TYPE_DS1501_NVRAM_ADDR_REG        0x10
-#define TODC_TYPE_DS1501_NVRAM_DATA_REG        0x13
-
-#define TODC_TYPE_DS1553_NVRAM_SIZE            0x1ff0
-#define TODC_TYPE_DS1553_SW_FLAGS              0
-#define TODC_TYPE_DS1553_YEAR                  0x1fff
-#define TODC_TYPE_DS1553_MONTH                 0x1ffe
-#define TODC_TYPE_DS1553_DOM                   0x1ffd  /* Day of Month */
-#define TODC_TYPE_DS1553_DOW                   0x1ffc  /* Day of Week */
-#define TODC_TYPE_DS1553_HOURS                 0x1ffb
-#define TODC_TYPE_DS1553_MINUTES               0x1ffa
-#define TODC_TYPE_DS1553_SECONDS               0x1ff9
-#define TODC_TYPE_DS1553_CNTL_B                        0x1ff9
-#define TODC_TYPE_DS1553_CNTL_A                        0x1ff8  /* control_a R/W regs */
-#define TODC_TYPE_DS1553_WATCHDOG              0x1ff7
-#define TODC_TYPE_DS1553_INTERRUPTS            0x1ff6
-#define TODC_TYPE_DS1553_ALARM_DATE            0x1ff5
-#define TODC_TYPE_DS1553_ALARM_HOUR            0x1ff4
-#define TODC_TYPE_DS1553_ALARM_MINUTES         0x1ff3
-#define TODC_TYPE_DS1553_ALARM_SECONDS         0x1ff2
-#define TODC_TYPE_DS1553_CENTURY               0x1ff8
-#define TODC_TYPE_DS1553_FLAGS                 0x1ff0
-#define TODC_TYPE_DS1553_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1553_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1557_NVRAM_SIZE            0x7fff0
-#define TODC_TYPE_DS1557_SW_FLAGS              0
-#define TODC_TYPE_DS1557_YEAR                  0x7ffff
-#define TODC_TYPE_DS1557_MONTH                 0x7fffe
-#define TODC_TYPE_DS1557_DOM                   0x7fffd /* Day of Month */
-#define TODC_TYPE_DS1557_DOW                   0x7fffc /* Day of Week */
-#define TODC_TYPE_DS1557_HOURS                 0x7fffb
-#define TODC_TYPE_DS1557_MINUTES               0x7fffa
-#define TODC_TYPE_DS1557_SECONDS               0x7fff9
-#define TODC_TYPE_DS1557_CNTL_B                        0x7fff9
-#define TODC_TYPE_DS1557_CNTL_A                        0x7fff8 /* control_a R/W regs */
-#define TODC_TYPE_DS1557_WATCHDOG              0x7fff7
-#define TODC_TYPE_DS1557_INTERRUPTS            0x7fff6
-#define TODC_TYPE_DS1557_ALARM_DATE            0x7fff5
-#define TODC_TYPE_DS1557_ALARM_HOUR            0x7fff4
-#define TODC_TYPE_DS1557_ALARM_MINUTES         0x7fff3
-#define TODC_TYPE_DS1557_ALARM_SECONDS         0x7fff2
-#define TODC_TYPE_DS1557_CENTURY               0x7fff8
-#define TODC_TYPE_DS1557_FLAGS                 0x7fff0
-#define TODC_TYPE_DS1557_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1557_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1643_NVRAM_SIZE            0x1ff8
-#define TODC_TYPE_DS1643_SW_FLAGS              0
-#define TODC_TYPE_DS1643_YEAR                  0x1fff
-#define TODC_TYPE_DS1643_MONTH                 0x1ffe
-#define TODC_TYPE_DS1643_DOM                   0x1ffd  /* Day of Month */
-#define TODC_TYPE_DS1643_DOW                   0x1ffc  /* Day of Week */
-#define TODC_TYPE_DS1643_HOURS                 0x1ffb
-#define TODC_TYPE_DS1643_MINUTES               0x1ffa
-#define TODC_TYPE_DS1643_SECONDS               0x1ff9
-#define TODC_TYPE_DS1643_CNTL_B                        0x1ff9
-#define TODC_TYPE_DS1643_CNTL_A                        0x1ff8  /* control_a R/W regs */
-#define TODC_TYPE_DS1643_WATCHDOG              0x1fff
-#define TODC_TYPE_DS1643_INTERRUPTS            0x1fff
-#define TODC_TYPE_DS1643_ALARM_DATE            0x1fff
-#define TODC_TYPE_DS1643_ALARM_HOUR            0x1fff
-#define TODC_TYPE_DS1643_ALARM_MINUTES         0x1fff
-#define TODC_TYPE_DS1643_ALARM_SECONDS         0x1fff
-#define TODC_TYPE_DS1643_CENTURY               0x1ff8
-#define TODC_TYPE_DS1643_FLAGS                 0x1fff
-#define TODC_TYPE_DS1643_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1643_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1693_NVRAM_SIZE            0 /* Not handled yet */
-#define TODC_TYPE_DS1693_SW_FLAGS              0
-#define TODC_TYPE_DS1693_YEAR                  0x09
-#define TODC_TYPE_DS1693_MONTH                 0x08
-#define TODC_TYPE_DS1693_DOM                   0x07    /* Day of Month */
-#define TODC_TYPE_DS1693_DOW                   0x06    /* Day of Week */
-#define TODC_TYPE_DS1693_HOURS                 0x04
-#define TODC_TYPE_DS1693_MINUTES               0x02
-#define TODC_TYPE_DS1693_SECONDS               0x00
-#define TODC_TYPE_DS1693_CNTL_B                        0x0b
-#define TODC_TYPE_DS1693_CNTL_A                        0x0a
-#define TODC_TYPE_DS1693_WATCHDOG              0xff
-#define TODC_TYPE_DS1693_INTERRUPTS            0xff
-#define TODC_TYPE_DS1693_ALARM_DATE            0x49
-#define TODC_TYPE_DS1693_ALARM_HOUR            0x05
-#define TODC_TYPE_DS1693_ALARM_MINUTES         0x03
-#define TODC_TYPE_DS1693_ALARM_SECONDS         0x01
-#define TODC_TYPE_DS1693_CENTURY               0x48
-#define TODC_TYPE_DS1693_FLAGS                 0xff
-#define TODC_TYPE_DS1693_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1693_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1743_NVRAM_SIZE            0x1ff8
-#define TODC_TYPE_DS1743_SW_FLAGS              0
-#define TODC_TYPE_DS1743_YEAR                  0x1fff
-#define TODC_TYPE_DS1743_MONTH                 0x1ffe
-#define TODC_TYPE_DS1743_DOM                   0x1ffd  /* Day of Month */
-#define TODC_TYPE_DS1743_DOW                   0x1ffc  /* Day of Week */
-#define TODC_TYPE_DS1743_HOURS                 0x1ffb
-#define TODC_TYPE_DS1743_MINUTES               0x1ffa
-#define TODC_TYPE_DS1743_SECONDS               0x1ff9
-#define TODC_TYPE_DS1743_CNTL_B                        0x1ff9
-#define TODC_TYPE_DS1743_CNTL_A                        0x1ff8  /* control_a R/W regs */
-#define TODC_TYPE_DS1743_WATCHDOG              0x1fff
-#define TODC_TYPE_DS1743_INTERRUPTS            0x1fff
-#define TODC_TYPE_DS1743_ALARM_DATE            0x1fff
-#define TODC_TYPE_DS1743_ALARM_HOUR            0x1fff
-#define TODC_TYPE_DS1743_ALARM_MINUTES         0x1fff
-#define TODC_TYPE_DS1743_ALARM_SECONDS         0x1fff
-#define TODC_TYPE_DS1743_CENTURY               0x1ff8
-#define TODC_TYPE_DS1743_FLAGS                 0x1fff
-#define TODC_TYPE_DS1743_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1743_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1746_NVRAM_SIZE            0x1fff8
-#define TODC_TYPE_DS1746_SW_FLAGS              0
-#define TODC_TYPE_DS1746_YEAR                  0x1ffff
-#define TODC_TYPE_DS1746_MONTH                 0x1fffe
-#define TODC_TYPE_DS1746_DOM                   0x1fffd /* Day of Month */
-#define TODC_TYPE_DS1746_DOW                   0x1fffc /* Day of Week */
-#define TODC_TYPE_DS1746_HOURS                 0x1fffb
-#define TODC_TYPE_DS1746_MINUTES               0x1fffa
-#define TODC_TYPE_DS1746_SECONDS               0x1fff9
-#define TODC_TYPE_DS1746_CNTL_B                        0x1fff9
-#define TODC_TYPE_DS1746_CNTL_A                        0x1fff8 /* control_a R/W regs */
-#define TODC_TYPE_DS1746_WATCHDOG              0x00000
-#define TODC_TYPE_DS1746_INTERRUPTS            0x00000
-#define TODC_TYPE_DS1746_ALARM_DATE            0x00000
-#define TODC_TYPE_DS1746_ALARM_HOUR            0x00000
-#define TODC_TYPE_DS1746_ALARM_MINUTES         0x00000
-#define TODC_TYPE_DS1746_ALARM_SECONDS         0x00000
-#define TODC_TYPE_DS1746_CENTURY               0x00000
-#define TODC_TYPE_DS1746_FLAGS                 0x00000
-#define TODC_TYPE_DS1746_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1746_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS1747_NVRAM_SIZE            0x7fff8
-#define TODC_TYPE_DS1747_SW_FLAGS              0
-#define TODC_TYPE_DS1747_YEAR                  0x7ffff
-#define TODC_TYPE_DS1747_MONTH                 0x7fffe
-#define TODC_TYPE_DS1747_DOM                   0x7fffd /* Day of Month */
-#define TODC_TYPE_DS1747_DOW                   0x7fffc /* Day of Week */
-#define TODC_TYPE_DS1747_HOURS                 0x7fffb
-#define TODC_TYPE_DS1747_MINUTES               0x7fffa
-#define TODC_TYPE_DS1747_SECONDS               0x7fff9
-#define TODC_TYPE_DS1747_CNTL_B                        0x7fff9
-#define TODC_TYPE_DS1747_CNTL_A                        0x7fff8 /* control_a R/W regs */
-#define TODC_TYPE_DS1747_WATCHDOG              0x00000
-#define TODC_TYPE_DS1747_INTERRUPTS            0x00000
-#define TODC_TYPE_DS1747_ALARM_DATE            0x00000
-#define TODC_TYPE_DS1747_ALARM_HOUR            0x00000
-#define TODC_TYPE_DS1747_ALARM_MINUTES         0x00000
-#define TODC_TYPE_DS1747_ALARM_SECONDS         0x00000
-#define TODC_TYPE_DS1747_CENTURY               0x00000
-#define TODC_TYPE_DS1747_FLAGS                 0x00000
-#define TODC_TYPE_DS1747_NVRAM_ADDR_REG                0
-#define TODC_TYPE_DS1747_NVRAM_DATA_REG                0
-
-#define TODC_TYPE_DS17285_NVRAM_SIZE           (0x1000-0x80) /* 4Kx8 NVRAM (minus RTC regs) */
-#define TODC_TYPE_DS17285_SW_FLAGS             TODC_FLAG_2_LEVEL_NVRAM
-#define TODC_TYPE_DS17285_SECONDS              (TODC_TYPE_DS17285_NVRAM_SIZE + 0x00)
-#define TODC_TYPE_DS17285_ALARM_SECONDS                (TODC_TYPE_DS17285_NVRAM_SIZE + 0x01)
-#define TODC_TYPE_DS17285_MINUTES              (TODC_TYPE_DS17285_NVRAM_SIZE + 0x02)
-#define TODC_TYPE_DS17285_ALARM_MINUTES                (TODC_TYPE_DS17285_NVRAM_SIZE + 0x03)
-#define TODC_TYPE_DS17285_HOURS                        (TODC_TYPE_DS17285_NVRAM_SIZE + 0x04)
-#define TODC_TYPE_DS17285_ALARM_HOUR           (TODC_TYPE_DS17285_NVRAM_SIZE + 0x05)
-#define TODC_TYPE_DS17285_DOW                  (TODC_TYPE_DS17285_NVRAM_SIZE + 0x06)
-#define TODC_TYPE_DS17285_DOM                  (TODC_TYPE_DS17285_NVRAM_SIZE + 0x07)
-#define TODC_TYPE_DS17285_MONTH                        (TODC_TYPE_DS17285_NVRAM_SIZE + 0x08)
-#define TODC_TYPE_DS17285_YEAR                 (TODC_TYPE_DS17285_NVRAM_SIZE + 0x09)
-#define TODC_TYPE_DS17285_CNTL_A               (TODC_TYPE_DS17285_NVRAM_SIZE + 0x0A)
-#define TODC_TYPE_DS17285_CNTL_B               (TODC_TYPE_DS17285_NVRAM_SIZE + 0x0B)
-#define TODC_TYPE_DS17285_CNTL_C               (TODC_TYPE_DS17285_NVRAM_SIZE + 0x0C)
-#define TODC_TYPE_DS17285_CNTL_D               (TODC_TYPE_DS17285_NVRAM_SIZE + 0x0D)
-#define TODC_TYPE_DS17285_WATCHDOG             0
-#define TODC_TYPE_DS17285_INTERRUPTS           0
-#define TODC_TYPE_DS17285_ALARM_DATE           0
-#define TODC_TYPE_DS17285_CENTURY              0
-#define TODC_TYPE_DS17285_FLAGS                        0
-#define TODC_TYPE_DS17285_NVRAM_ADDR_REG       0x50
-#define TODC_TYPE_DS17285_NVRAM_DATA_REG       0x53
-
-#define TODC_TYPE_MC146818_NVRAM_SIZE          0       /* XXXX */
-#define TODC_TYPE_MC146818_SW_FLAGS            0
-#define TODC_TYPE_MC146818_YEAR                        0x09
-#define TODC_TYPE_MC146818_MONTH               0x08
-#define TODC_TYPE_MC146818_DOM                 0x07    /* Day of Month */
-#define TODC_TYPE_MC146818_DOW                 0x06    /* Day of Week */
-#define TODC_TYPE_MC146818_HOURS               0x04
-#define TODC_TYPE_MC146818_MINUTES             0x02
-#define TODC_TYPE_MC146818_SECONDS             0x00
-#define TODC_TYPE_MC146818_CNTL_B              0x0a
-#define TODC_TYPE_MC146818_CNTL_A              0x0b    /* control_a R/W regs */
-#define TODC_TYPE_MC146818_WATCHDOG            0
-#define TODC_TYPE_MC146818_INTERRUPTS          0x0c
-#define TODC_TYPE_MC146818_ALARM_DATE          0xff
-#define TODC_TYPE_MC146818_ALARM_HOUR          0x05
-#define TODC_TYPE_MC146818_ALARM_MINUTES       0x03
-#define TODC_TYPE_MC146818_ALARM_SECONDS       0x01
-#define TODC_TYPE_MC146818_CENTURY             0xff
-#define TODC_TYPE_MC146818_FLAGS               0xff
-#define TODC_TYPE_MC146818_NVRAM_ADDR_REG      0
-#define TODC_TYPE_MC146818_NVRAM_DATA_REG      0
-
-#define TODC_TYPE_PC97307_NVRAM_SIZE           0       /* No NVRAM? */
-#define TODC_TYPE_PC97307_SW_FLAGS             0
-#define TODC_TYPE_PC97307_YEAR                 0x09
-#define TODC_TYPE_PC97307_MONTH                        0x08
-#define TODC_TYPE_PC97307_DOM                  0x07    /* Day of Month */
-#define TODC_TYPE_PC97307_DOW                  0x06    /* Day of Week */
-#define TODC_TYPE_PC97307_HOURS                        0x04
-#define TODC_TYPE_PC97307_MINUTES              0x02
-#define TODC_TYPE_PC97307_SECONDS              0x00
-#define TODC_TYPE_PC97307_CNTL_B               0x0a
-#define TODC_TYPE_PC97307_CNTL_A               0x0b    /* control_a R/W regs */
-#define TODC_TYPE_PC97307_WATCHDOG             0x0c
-#define TODC_TYPE_PC97307_INTERRUPTS           0x0d
-#define TODC_TYPE_PC97307_ALARM_DATE           0xff
-#define TODC_TYPE_PC97307_ALARM_HOUR           0x05
-#define TODC_TYPE_PC97307_ALARM_MINUTES                0x03
-#define TODC_TYPE_PC97307_ALARM_SECONDS                0x01
-#define TODC_TYPE_PC97307_CENTURY              0xff
-#define TODC_TYPE_PC97307_FLAGS                        0xff
-#define TODC_TYPE_PC97307_NVRAM_ADDR_REG       0
-#define TODC_TYPE_PC97307_NVRAM_DATA_REG       0
-
-/*
- * Define macros to allocate and init the todc_info_t table that will
- * be used by the todc_time.c routines.
- */
-#define TODC_ALLOC()                                                   \
-       static todc_info_t todc_info_alloc;                             \
-       todc_info_t *todc_info = &todc_info_alloc;
-
-#define TODC_INIT(clock_type, as0, as1, data, bits) {                  \
-       todc_info->rtc_type = clock_type;                               \
-                                                                       \
-       todc_info->nvram_as0 = (unsigned int)(as0);                     \
-       todc_info->nvram_as1 = (unsigned int)(as1);                     \
-       todc_info->nvram_data = (unsigned int)(data);                   \
-                                                                       \
-       todc_info->as0_bits = (bits);                                   \
-                                                                       \
-       todc_info->nvram_size = clock_type ##_NVRAM_SIZE;               \
-       todc_info->sw_flags = clock_type ##_SW_FLAGS;                   \
-                                                                       \
-       todc_info->year = clock_type ##_YEAR;                           \
-       todc_info->month = clock_type ##_MONTH;                         \
-       todc_info->day_of_month = clock_type ##_DOM;                    \
-       todc_info->day_of_week = clock_type ##_DOW;                     \
-       todc_info->hours = clock_type ##_HOURS;                         \
-       todc_info->minutes = clock_type ##_MINUTES;                     \
-       todc_info->seconds = clock_type ##_SECONDS;                     \
-       todc_info->control_b = clock_type ##_CNTL_B;                    \
-       todc_info->control_a = clock_type ##_CNTL_A;                    \
-       todc_info->watchdog = clock_type ##_WATCHDOG;                   \
-       todc_info->interrupts = clock_type ##_INTERRUPTS;               \
-       todc_info->alarm_date = clock_type ##_ALARM_DATE;               \
-       todc_info->alarm_hour = clock_type ##_ALARM_HOUR;               \
-       todc_info->alarm_minutes = clock_type ##_ALARM_MINUTES;         \
-       todc_info->alarm_seconds = clock_type ##_ALARM_SECONDS;         \
-       todc_info->century = clock_type ##_CENTURY;                     \
-       todc_info->flags = clock_type ##_FLAGS;                         \
-                                                                       \
-       todc_info->nvram_addr_reg = clock_type ##_NVRAM_ADDR_REG;       \
-       todc_info->nvram_data_reg = clock_type ##_NVRAM_DATA_REG;       \
-}
-
-extern todc_info_t *todc_info;
-
-unsigned char todc_direct_read_val(int addr);
-void todc_direct_write_val(int addr, unsigned char val);
-unsigned char todc_m48txx_read_val(int addr);
-void todc_m48txx_write_val(int addr, unsigned char val);
-unsigned char todc_mc146818_read_val(int addr);
-void todc_mc146818_write_val(int addr, unsigned char val);
-
-long todc_time_init(void);
-void todc_get_rtc_time(struct rtc_time *);
-int todc_set_rtc_time(struct rtc_time *);
-void todc_calibrate_decr(void);
-
-#endif                         /* __PPC_KERNEL_TODC_H */
index 9fe7894ee035648f9c1678b496f4dfa723a63b5a..50c014007de754c3019fa7f838160a515b6f38f1 100644 (file)
@@ -32,7 +32,14 @@ static inline int node_to_first_cpu(int node)
 int of_node_to_nid(struct device_node *device);
 
 struct pci_bus;
+#ifdef CONFIG_PCI
 extern int pcibus_to_node(struct pci_bus *bus);
+#else
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+       return -1;
+}
+#endif
 
 #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
                                        CPU_MASK_ALL : \
index 2c702d35a7cfc0f461386a6c514bed4ff968ee6e..4e95d153be8445b4ec1f379d712a402422add1ff 100644 (file)
@@ -98,12 +98,12 @@ typedef struct {
 extern u32 get_vir_csrbase(void);
 extern u32 tsi108_csr_vir_base;
 
-extern inline u32 tsi108_read_reg(u32 reg_offset)
+static inline u32 tsi108_read_reg(u32 reg_offset)
 {
        return in_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset));
 }
 
-extern inline void tsi108_write_reg(u32 reg_offset, u32 val)
+static inline void tsi108_write_reg(u32 reg_offset, u32 val)
 {
        out_be32((volatile u32 *)(tsi108_csr_vir_base + reg_offset), val);
 }
index d6fb56b80453c44d5ad922d25cfc25060655748c..3b363757a2bbf7370cdcff220becf11313f2fd8f 100644 (file)
@@ -97,16 +97,6 @@ typedef struct {
        unsigned long env;
 } func_descr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index d83fc29c2bbf13ed8deed9244e7f71c78dd4c78c..adbf16b8cfbbbf8e52ece9bf71a089845f8ca71a 100644 (file)
@@ -304,7 +304,7 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 
 #ifndef __powerpc64__
 
-extern inline unsigned long copy_from_user(void *to,
+static inline unsigned long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
        unsigned long over;
@@ -319,7 +319,7 @@ extern inline unsigned long copy_from_user(void *to,
        return n;
 }
 
-extern inline unsigned long copy_to_user(void __user *to,
+static inline unsigned long copy_to_user(void __user *to,
                const void *from, unsigned long n)
 {
        unsigned long over;
index 0e4ea37f646602f0ca1908ecd2109d1ffae53fb3..0ae954e3d2584e86a95ae717bdc8956e049b3d46 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-/* On powerpc a system call basically clobbers the same registers like a
- * function call, with the exception of LR (which is needed for the
- * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
- * an error return status).
- */
-
-#define __syscall_nr(nr, type, name, args...)                          \
-       unsigned long __sc_ret, __sc_err;                               \
-       {                                                               \
-               register unsigned long __sc_0  __asm__ ("r0");          \
-               register unsigned long __sc_3  __asm__ ("r3");          \
-               register unsigned long __sc_4  __asm__ ("r4");          \
-               register unsigned long __sc_5  __asm__ ("r5");          \
-               register unsigned long __sc_6  __asm__ ("r6");          \
-               register unsigned long __sc_7  __asm__ ("r7");          \
-               register unsigned long __sc_8  __asm__ ("r8");          \
-                                                                       \
-               __sc_loadargs_##nr(name, args);                         \
-               __asm__ __volatile__                                    \
-                       ("sc           \n\t"                            \
-                        "mfcr %0      "                                \
-                       : "=&r" (__sc_0),                               \
-                         "=&r" (__sc_3),  "=&r" (__sc_4),              \
-                         "=&r" (__sc_5),  "=&r" (__sc_6),              \
-                         "=&r" (__sc_7),  "=&r" (__sc_8)               \
-                       : __sc_asm_input_##nr                           \
-                       : "cr0", "ctr", "memory",                       \
-                         "r9", "r10","r11", "r12");                    \
-               __sc_ret = __sc_3;                                      \
-               __sc_err = __sc_0;                                      \
-       }                                                               \
-       if (__sc_err & 0x10000000)                                      \
-       {                                                               \
-               errno = __sc_ret;                                       \
-               __sc_ret = -1;                                          \
-       }                                                               \
-       return (type) __sc_ret
-
-#define __sc_loadargs_0(name, dummy...)                                        \
-       __sc_0 = __NR_##name
-#define __sc_loadargs_1(name, arg1)                                    \
-       __sc_loadargs_0(name);                                          \
-       __sc_3 = (unsigned long) (arg1)
-#define __sc_loadargs_2(name, arg1, arg2)                              \
-       __sc_loadargs_1(name, arg1);                                    \
-       __sc_4 = (unsigned long) (arg2)
-#define __sc_loadargs_3(name, arg1, arg2, arg3)                                \
-       __sc_loadargs_2(name, arg1, arg2);                              \
-       __sc_5 = (unsigned long) (arg3)
-#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4)                  \
-       __sc_loadargs_3(name, arg1, arg2, arg3);                        \
-       __sc_6 = (unsigned long) (arg4)
-#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5)            \
-       __sc_loadargs_4(name, arg1, arg2, arg3, arg4);                  \
-       __sc_7 = (unsigned long) (arg5)
-#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6)      \
-       __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5);            \
-       __sc_8 = (unsigned long) (arg6)
-
-#define __sc_asm_input_0 "0" (__sc_0)
-#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
-#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
-#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
-#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
-#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
-#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
-
-#define _syscall0(type,name)                                           \
-type name(void)                                                                \
-{                                                                      \
-       __syscall_nr(0, type, name);                                    \
-}
-
-#define _syscall1(type,name,type1,arg1)                                        \
-type name(type1 arg1)                                                  \
-{                                                                      \
-       __syscall_nr(1, type, name, arg1);                              \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)                     \
-type name(type1 arg1, type2 arg2)                                      \
-{                                                                      \
-       __syscall_nr(2, type, name, arg1, arg2);                        \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)          \
-type name(type1 arg1, type2 arg2, type3 arg3)                          \
-{                                                                      \
-       __syscall_nr(3, type, name, arg1, arg2, arg3);                  \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4)              \
-{                                                                      \
-       __syscall_nr(4, type, name, arg1, arg2, arg3, arg4);            \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5)  \
-{                                                                      \
-       __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5);      \
-}
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{                                                                      \
-       __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
-}
-
-
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/linkage.h>
-#include <asm/syscalls.h>
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
@@ -481,16 +371,9 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
 
 /*
  * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
  */
-#ifdef CONFIG_PPC32
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#else
-#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
-#endif
-
+#define cond_syscall(x) \
+       asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall")))
 
 #endif         /* __ASSEMBLY__ */
 #endif         /* __KERNEL__ */
index 4b51d42e1419d2cf0f27d2c40f448f474005e912..0117b544ecbce1ff1c7741f3e55938bd31f34081 100644 (file)
@@ -45,7 +45,6 @@ struct iommu_table;
  * The vio_dev structure is used to describe virtual I/O devices.
  */
 struct vio_dev {
-       struct iommu_table *iommu_table;     /* vio_map_* uses this */
        const char *name;
        const char *type;
        uint32_t unit_address;
index f1d337ed68d5bfdaf16ee9eaf6823ffa2ead81d7..88320a05f0a8738615bc8ea62c5cc606edf9ff15 100644 (file)
 
 #ifdef CONFIG_XMON
 extern void xmon_setup(void);
+extern void xmon_register_spus(struct list_head *list);
 #else
 static inline void xmon_setup(void) { };
+static inline void xmon_register_spus(struct list_head *list) { };
 #endif
 
 #endif /* __KERNEL __ */
index 1d2c4ef81c2233673fa64e8569e78b08ed923121..f7b21ee302b4f1b7fcd1e923fa14c3da0e708f6e 100644 (file)
@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
        unsigned long vaddr;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-       inc_preempt_count();
+       pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
        unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
 
        if (vaddr < KMAP_FIX_BEGIN) { // FIXME
-               dec_preempt_count();
-               preempt_check_resched();
+               pagefault_enable();
                return;
        }
 
@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
        pte_clear(&init_mm, vaddr, kmap_pte+idx);
        flush_tlb_page(NULL, vaddr);
 #endif
-       dec_preempt_count();
-       preempt_check_resched();
+       pagefault_enable();
 }
 
 static inline struct page *kmap_atomic_to_page(void *ptr)
index a4c411b753efe945416a3b6e12d9725b8eb35c42..ccf1a9bb2e435a819eba069c2ddb2ab387e5aa95 100644 (file)
 
 #if defined(CONFIG_4xx)
 #include <asm/ibm4xx.h>
-#elif defined(CONFIG_PPC_MPC52xx)
-#include <asm/mpc52xx.h>
 #elif defined(CONFIG_8xx)
 #include <asm/mpc8xx.h>
 #elif defined(CONFIG_8260)
 #include <asm/mpc8260.h>
-#elif defined(CONFIG_83xx)
-#include <asm/mpc83xx.h>
-#elif defined(CONFIG_85xx)
-#include <asm/mpc85xx.h>
-#elif defined(CONFIG_APUS)
+#elif defined(CONFIG_APUS) || !defined(CONFIG_PCI)
 #define _IO_BASE       0
 #define _ISA_MEM_BASE  0
 #define PCI_DRAM_OFFSET 0
@@ -237,6 +231,14 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
 #define insl(port, buf, nl)    _insl_ns((port)+___IO_BASE, (buf), (nl))
 #define outsl(port, buf, nl)   _outsl_ns((port)+___IO_BASE, (buf), (nl))
 
+#define readsb(a, b, n)                _insb((a), (b), (n))
+#define readsw(a, b, n)                _insw_ns((a), (b), (n))
+#define readsl(a, b, n)                _insl_ns((a), (b), (n))
+#define writesb(a, b, n)       _outsb((a),(b),(n))
+#define writesw(a, b, n)       _outsw_ns((a),(b),(n))
+#define writesl(a, b, n)       _outsl_ns((a),(b),(n))
+
+
 /*
  * On powermacs and 8xx we will get a machine check exception 
  * if we try to read data from a non-existent I/O port. Because
@@ -327,12 +329,12 @@ __do_out_asm(outl, "stwbrx")
 #define inl_p(port)            inl((port))
 #define outl_p(val, port)      outl((val), (port))
 
-extern void _insb(volatile u8 __iomem *port, void *buf, long count);
-extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
-extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
-extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
-extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
-extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
+extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
+extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
+extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
 
 
 #define IO_SPACE_LIMIT ~0
index f3c5e5dfa986021c70b4c8faadeeb4f300af11c0..a5277ea4b1944189dcf67a3b310ebdb553198b7a 100644 (file)
@@ -39,7 +39,7 @@
 #define M48T35_RTC_WATCHDOG_RB         0x03
 #define M48T35_RTC_WATCHDOG_BMB        0x7c
 #define M48T35_RTC_WATCHDOG_WDS        0x80
-#define M48T35_RTC_WATCHDOG_ALL        (M48T35_RTC_WATCHDOG_RB|M48T35_RTC_WATCHDOG_BMB|M48T35_RTC_W
+#define M48T35_RTC_WATCHDOG_ALL        (M48T35_RTC_WATCHDOG_RB|M48T35_RTC_WATCHDOG_BMB|M48T35_RTC_W)
 
 #define M48T35_RTC_CONTROL_WRITE       0x80
 #define M48T35_RTC_CONTROL_READ        0x40
index 64c8874618dc01c422f9edd0c0462bae3299f49b..d9d21aa68ba37ffb1cfa9ab549b665b83a60e187 100644 (file)
@@ -29,17 +29,6 @@ struct pt_regs;
 #endif /* __ASSEMBLY__ */
 
 
-#ifdef CONFIG_PCI
-#define _IO_BASE       isa_io_base
-#define _ISA_MEM_BASE  isa_mem_base
-#define PCI_DRAM_OFFSET        pci_dram_offset
-#else
-#define _IO_BASE       0
-#define _ISA_MEM_BASE  0
-#define PCI_DRAM_OFFSET        0
-#endif
-
-
 /* ======================================================================== */
 /* PPC Sys devices definition                                               */
 /* ======================================================================== */
index 02ed2c3257149fe53badc6c58477377457fbe2d8..c3061972309b827f81f69733870b650376ac9779 100644 (file)
 #include <platforms/83xx/mpc834x_sys.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /*
  * The "residual" board information structure the boot loader passes
  * into the kernel.
index 9b4851199c769c3b3553657cc8f0bef22ae331e3..d7e4a79d77fb414a4d524f26f5044511a9739cb8 100644 (file)
 #include <platforms/85xx/tqm85xx.h>
 #endif
 
-#define _IO_BASE        isa_io_base
-#define _ISA_MEM_BASE   isa_mem_base
-#ifdef CONFIG_PCI
-#define PCI_DRAM_OFFSET pci_dram_offset
-#else
-#define PCI_DRAM_OFFSET 0
-#endif
-
 /*
  * The "residual" board information structure the boot loader passes
  * into the kernel.
index 9d5230689b31dbf3d2e4b549301c82c9cca21dd2..6c955d0c1ef0d217199f30ea358dd1f03cf45e31 100644 (file)
@@ -43,6 +43,7 @@ struct pci_controller {
        struct pci_controller *next;
         struct pci_bus *bus;
        void *arch_data;
+       struct device *parent;
 
        int first_busno;
        int last_busno;
index 7664bacdd832f0ce1eacbb20cc406ba946f9bb49..9574fe80a04667d89c951adfd8c226084f0e242f 100644 (file)
@@ -8,12 +8,13 @@
 #ifndef _ASM_S390_SETUP_H
 #define _ASM_S390_SETUP_H
 
+#define COMMAND_LINE_SIZE      896
+
 #ifdef __KERNEL__
 
 #include <asm/types.h>
 
 #define PARMAREA               0x10400
-#define COMMAND_LINE_SIZE      896
 #define MEMORY_CHUNKS          16      /* max 0x7fff */
 #define IPL_PARMBLOCK_ORIGIN   0x2000
 
index ae2951cc83aca1cc634d4b6ab7d56694779da41d..fc5d7cf19324be9160352b7ce0675619278ec23e 100644 (file)
@@ -87,16 +87,6 @@ typedef union {
        } subreg;
 } register_pair;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* ! __s390x__   */
 #endif /* __ASSEMBLY__  */
 #endif /* __KERNEL__    */
index 71d3c21b84f0a4169202680053c3f51cb028ec74..fb6fef97d739c55e5ff32bf1eca477c4697a35e8 100644 (file)
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-#define __syscall_return(type, res)                         \
-do {                                                        \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-               errno = -(res);                              \
-               res = -1;                                    \
-       }                                                    \
-       return (type) (res);                                 \
-} while (0)
-
-#define _svc_clobber "1", "cc", "memory"
-
-#define _syscall0(type,name)                                   \
-type name(void) {                                              \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name)                             \
-               : _svc_clobber);                                \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
-#define _syscall1(type,name,type1,arg1)                                \
-type name(type1 arg1) {                                                \
-       register type1 __arg1 asm("2") = arg1;                  \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name),                            \
-                 "0" (__arg1)                                  \
-               : _svc_clobber);                                \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2)             \
-type name(type1 arg1, type2 arg2) {                            \
-       register type1 __arg1 asm("2") = arg1;                  \
-       register type2 __arg2 asm("3") = arg2;                  \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name),                            \
-                 "0" (__arg1),                                 \
-                 "d" (__arg2)                                  \
-               : _svc_clobber );                               \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)  \
-type name(type1 arg1, type2 arg2, type3 arg3) {                        \
-       register type1 __arg1 asm("2") = arg1;                  \
-       register type2 __arg2 asm("3") = arg2;                  \
-       register type3 __arg3 asm("4") = arg3;                  \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name),                            \
-                 "0" (__arg1),                                 \
-                 "d" (__arg2),                                 \
-                 "d" (__arg3)                                  \
-               : _svc_clobber);                                \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,  \
-                 type4,name4)                                  \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) {    \
-       register type1 __arg1 asm("2") = arg1;                  \
-       register type2 __arg2 asm("3") = arg2;                  \
-       register type3 __arg3 asm("4") = arg3;                  \
-       register type4 __arg4 asm("5") = arg4;                  \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name),                            \
-                 "0" (__arg1),                                 \
-                 "d" (__arg2),                                 \
-                 "d" (__arg3),                                 \
-                 "d" (__arg4)                                  \
-               : _svc_clobber);                                \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,  \
-                 type4,name4,type5,name5)                      \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4,      \
-         type5 arg5) {                                         \
-       register type1 __arg1 asm("2") = arg1;                  \
-       register type2 __arg2 asm("3") = arg2;                  \
-       register type3 __arg3 asm("4") = arg3;                  \
-       register type4 __arg4 asm("5") = arg4;                  \
-       register type5 __arg5 asm("6") = arg5;                  \
-       register long __svcres asm("2");                        \
-       long __res;                                             \
-       asm volatile(                                           \
-               "       .if     %1 < 256\n"                     \
-               "       svc     %b1\n"                          \
-               "       .else\n"                                \
-               "       la      %%r1,%1\n"                      \
-               "       svc     0\n"                            \
-               "       .endif"                                 \
-               : "=d" (__svcres)                               \
-               : "i" (__NR_##name),                            \
-                 "0" (__arg1),                                 \
-                 "d" (__arg2),                                 \
-                 "d" (__arg3),                                 \
-                 "d" (__arg4),                                 \
-                 "d" (__arg5)                                  \
-               : _svc_clobber);                                \
-       __res = __svcres;                                       \
-       __syscall_return(type,__res);                           \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
index 8bdc1ba56f736570788c445bf5ef4f4dc1bde5a8..28305c3cbddf88d770f7c22020bec367121e74a2 100644 (file)
@@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_add    \n"
-"      add     %2, %0                          \n"
-"      movco.l %0, @%3                         \n"
+"1:    movli.l @%2, %0         ! atomic_add    \n"
+"      add     %1, %0                          \n"
+"      movco.l %0, @%2                         \n"
 "      bf      1b                              \n"
-       : "=&z" (tmp), "=r" (&v->counter)
+       : "=&z" (tmp)
        : "r" (i), "r" (&v->counter)
        : "t");
 #else
@@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_sub    \n"
-"      sub     %2, %0                          \n"
-"      movco.l %0, @%3                         \n"
+"1:    movli.l @%2, %0         ! atomic_sub    \n"
+"      sub     %1, %0                          \n"
+"      movco.l %0, @%2                         \n"
 "      bf      1b                              \n"
-       : "=&z" (tmp), "=r" (&v->counter)
+       : "=&z" (tmp)
        : "r" (i), "r" (&v->counter)
        : "t");
 #else
@@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v)
 
 #ifdef CONFIG_CPU_SH4A
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_add_return     \n"
-"      add     %2, %0                                  \n"
-"      movco.l %0, @%3                                 \n"
+"1:    movli.l @%2, %0         ! atomic_add_return     \n"
+"      add     %1, %0                                  \n"
+"      movco.l %0, @%2                                 \n"
 "      bf      1b                                      \n"
 "      synco                                           \n"
-       : "=&z" (temp), "=r" (&v->counter)
+       : "=&z" (temp)
        : "r" (i), "r" (&v->counter)
        : "t");
 #else
@@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 
 #ifdef CONFIG_CPU_SH4A
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_sub_return     \n"
-"      sub     %2, %0                                  \n"
-"      movco.l %0, @%3                                 \n"
+"1:    movli.l @%2, %0         ! atomic_sub_return     \n"
+"      sub     %1, %0                                  \n"
+"      movco.l %0, @%2                                 \n"
 "      bf      1b                                      \n"
 "      synco                                           \n"
-       : "=&z" (temp), "=r" (&v->counter)
+       : "=&z" (temp)
        : "r" (i), "r" (&v->counter)
        : "t");
 #else
@@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_clear_mask     \n"
-"      and     %2, %0                                  \n"
-"      movco.l %0, @%3                                 \n"
+"1:    movli.l @%2, %0         ! atomic_clear_mask     \n"
+"      and     %1, %0                                  \n"
+"      movco.l %0, @%2                                 \n"
 "      bf      1b                                      \n"
-       : "=&z" (tmp), "=r" (&v->counter)
+       : "=&z" (tmp)
        : "r" (~mask), "r" (&v->counter)
        : "t");
 #else
@@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
        unsigned long tmp;
 
        __asm__ __volatile__ (
-"1:    movli.l @%3, %0         ! atomic_set_mask       \n"
-"      or      %2, %0                                  \n"
-"      movco.l %0, @%3                                 \n"
+"1:    movli.l @%2, %0         ! atomic_set_mask       \n"
+"      or      %1, %0                                  \n"
+"      movco.l %0, @%2                                 \n"
 "      bf      1b                                      \n"
-       : "=&z" (tmp), "=r" (&v->counter)
+       : "=&z" (tmp)
        : "r" (mask), "r" (&v->counter)
        : "t");
 #else
index beeea40f549ec391248d0a0673851f478c3dad17..795047da5e17e28cc0f433209a4d5afc48284ca2 100644 (file)
@@ -23,16 +23,20 @@ static void __init check_bugs(void)
        cpu_data->loops_per_jiffy = loops_per_jiffy;
 
        switch (cpu_data->type) {
-       case CPU_SH7604:
+       case CPU_SH7604 ... CPU_SH7619:
                *p++ = '2';
                break;
+       case CPU_SH7206:
+               *p++ = '2';
+               *p++ = 'a';
+               break;
        case CPU_SH7705 ... CPU_SH7300:
                *p++ = '3';
                break;
        case CPU_SH7750 ... CPU_SH4_501:
                *p++ = '4';
                break;
-       case CPU_SH7770 ... CPU_SH7781:
+       case CPU_SH7770 ... CPU_SH7785:
                *p++ = '4';
                *p++ = 'a';
                break;
index fdfb75b30f0d86e935b66a2166cbeccc23da1806..1df92807f8c5b1bd67092d9c52e9338d79d7ab98 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/seq_file.h>
+#include <linux/clk.h>
 
 struct clk;
 
@@ -18,7 +19,7 @@ struct clk_ops {
 struct clk {
        struct list_head        node;
        const char              *name;
-
+       int                     id;
        struct module           *owner;
 
        struct clk              *parent;
@@ -40,22 +41,13 @@ void arch_init_clk_ops(struct clk_ops **, int type);
 int clk_init(void);
 
 int __clk_enable(struct clk *);
-int clk_enable(struct clk *);
-
 void __clk_disable(struct clk *);
-void clk_disable(struct clk *);
 
-int clk_set_rate(struct clk *, unsigned long rate);
-unsigned long clk_get_rate(struct clk *);
 void clk_recalc_rate(struct clk *);
 
-struct clk *clk_get(const char *id);
-void clk_put(struct clk *);
-
 int clk_register(struct clk *);
 void clk_unregister(struct clk *);
 
 int show_clocks(struct seq_file *m);
 
 #endif /* __ASM_SH_CLOCK_H */
-
index cd96402e8562d33cc0d948396c84937c56edac69..20b9796842dcf9a875cd0ed7747b54a518471028 100644 (file)
@@ -12,6 +12,7 @@
 
 #define L1_CACHE_SHIFT 4
 
+#if defined(CONFIG_CPU_SUBTYPE_SH7604)
 #define CCR            0xfffffe92      /* Address of Cache Control Register */
 
 #define CCR_CACHE_CE   0x01    /* Cache enable */
 #define CCR_CACHE_ORA          CCR_CACHE_TW
 #define CCR_CACHE_WT           0x00    /* SH-2 is _always_ write-through */
 
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define CCR1           0xffffffec
+#define CCR            CCR1
+
+#define CCR_CACHE_CE   0x01    /* Cache enable */
+#define CCR_CACHE_WT   0x06    /* CCR[bit1=1,bit2=1] */
+                               /* 0x00000000-0x7fffffff: Write-through  */
+                               /* 0x80000000-0x9fffffff: Write-back     */
+                                /* 0xc0000000-0xdfffffff: Write-through  */
+#define CCR_CACHE_CB   0x00    /* CCR[bit1=0,bit2=0] */
+                               /* 0x00000000-0x7fffffff: Write-back     */
+                               /* 0x80000000-0x9fffffff: Write-through  */
+                                /* 0xc0000000-0xdfffffff: Write-back     */
+#define CCR_CACHE_CF   0x08    /* Cache invalidate */
+
+#define CACHE_OC_ADDRESS_ARRAY 0xf0000000
+#define CACHE_OC_DATA_ARRAY    0xf1000000
+
+#define CCR_CACHE_ENABLE       CCR_CACHE_CE
+#define CCR_CACHE_INVALIDATE   CCR_CACHE_CF
+#endif
 #endif /* __ASM_CPU_SH2_CACHE_H */
 
diff --git a/include/asm-sh/cpu-sh2/freq.h b/include/asm-sh/cpu-sh2/freq.h
new file mode 100644 (file)
index 0000000..31de475
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_FREQ_H
+#define __ASM_CPU_SH2_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7619)
+#define FREQCR 0xf815ff80
+#endif
+
+#endif /* __ASM_CPU_SH2_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2/mmu_context.h b/include/asm-sh/cpu-sh2/mmu_context.h
new file mode 100644 (file)
index 0000000..beeb299
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * include/asm-sh/cpu-sh2/mmu_context.h
+ *
+ * Copyright (C) 2003  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2_MMU_CONTEXT_H
+#define __ASM_CPU_SH2_MMU_CONTEXT_H
+
+/* No MMU */
+
+#endif /* __ASM_CPU_SH2_MMU_CONTEXT_H */
+
diff --git a/include/asm-sh/cpu-sh2/timer.h b/include/asm-sh/cpu-sh2/timer.h
new file mode 100644 (file)
index 0000000..a39c241
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_CPU_SH2_TIMER_H
+#define __ASM_CPU_SH2_TIMER_H
+
+/* Nothing needed yet */
+
+#endif /* __ASM_CPU_SH2_TIMER_H */
diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h
new file mode 100644 (file)
index 0000000..3d2e9aa
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/addrspace.h>
diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h
new file mode 100644 (file)
index 0000000..3e4b9e4
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * include/asm-sh/cpu-sh2a/cache.h
+ *
+ * Copyright (C) 2004 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_CACHE_H
+#define __ASM_CPU_SH2A_CACHE_H
+
+#define L1_CACHE_SHIFT 4
+
+#define CCR1           0xfffc1000
+#define CCR2           0xfffc1004
+
+/* CCR1 behaves more like the traditional CCR */
+#define CCR            CCR1
+
+/*
+ * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
+ * listed here are reserved.
+ */
+#define CCR_CACHE_CB   0x0000  /* Hack */
+#define CCR_CACHE_OCE  0x0001
+#define CCR_CACHE_WT   0x0002
+#define CCR_CACHE_OCI  0x0008  /* OCF */
+#define CCR_CACHE_ICE  0x0100
+#define CCR_CACHE_ICI  0x0800  /* ICF */
+
+#define CACHE_IC_ADDRESS_ARRAY 0xf0000000
+#define CACHE_OC_ADDRESS_ARRAY 0xf0800000
+
+#define CCR_CACHE_ENABLE       (CCR_CACHE_OCE | CCR_CACHE_ICE)
+#define CCR_CACHE_INVALIDATE   (CCR_CACHE_OCI | CCR_CACHE_ICI)
+
+#endif /* __ASM_CPU_SH2A_CACHE_H */
+
diff --git a/include/asm-sh/cpu-sh2a/cacheflush.h b/include/asm-sh/cpu-sh2a/cacheflush.h
new file mode 100644 (file)
index 0000000..fa3186c
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/cacheflush.h>
diff --git a/include/asm-sh/cpu-sh2a/dma.h b/include/asm-sh/cpu-sh2a/dma.h
new file mode 100644 (file)
index 0000000..0d5ad85
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/dma.h>
diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h
new file mode 100644 (file)
index 0000000..e518fff
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * include/asm-sh/cpu-sh2a/freq.h
+ *
+ * Copyright (C) 2006  Yoshinori Sato
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_CPU_SH2A_FREQ_H
+#define __ASM_CPU_SH2A_FREQ_H
+
+#if defined(CONFIG_CPU_SUBTYPE_SH7206)
+#define FREQCR 0xfffe0010
+#endif
+
+#endif /* __ASM_CPU_SH2A_FREQ_H */
+
diff --git a/include/asm-sh/cpu-sh2a/mmu_context.h b/include/asm-sh/cpu-sh2a/mmu_context.h
new file mode 100644 (file)
index 0000000..cd2387f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/mmu_context.h>
diff --git a/include/asm-sh/cpu-sh2a/timer.h b/include/asm-sh/cpu-sh2a/timer.h
new file mode 100644 (file)
index 0000000..fee504a
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/timer.h>
diff --git a/include/asm-sh/cpu-sh2a/ubc.h b/include/asm-sh/cpu-sh2a/ubc.h
new file mode 100644 (file)
index 0000000..cf28062
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/ubc.h>
diff --git a/include/asm-sh/cpu-sh2a/watchdog.h b/include/asm-sh/cpu-sh2a/watchdog.h
new file mode 100644 (file)
index 0000000..c1b3e24
--- /dev/null
@@ -0,0 +1 @@
+#include <asm/cpu-sh2/watchdog.h>
index 56cd4b97723212bd2a3cd988464f202b34911f91..37ab0c131a4d9913036ab1997db036877bec3fda 100644 (file)
@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
        consistent_free(vaddr, size);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        consistent_sync(vaddr, size, (int)dir);
index d9daa028689f91b675583c892c00aa48925b4534..faf3051cd4290fd942c0c67d00fe3035607694dc 100644 (file)
@@ -14,9 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/sysdev.h>
-#include <linux/device.h>
 #include <asm/cpu/dma.h>
-#include <asm/semaphore.h>
 
 /* The maximum address that we can perform a DMA transfer to on this platform */
 /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any
  * DMAC (dma_info) flags
  */
 enum {
-       DMAC_CHANNELS_CONFIGURED        = 0x00,
-       DMAC_CHANNELS_TEI_CAPABLE       = 0x01,
+       DMAC_CHANNELS_CONFIGURED        = 0x01,
+       DMAC_CHANNELS_TEI_CAPABLE       = 0x02, /* Transfer end interrupt */
 };
 
 /*
  * DMA channel capabilities / flags
  */
 enum {
-       DMA_TEI_CAPABLE                 = 0x01,
-       DMA_CONFIGURED                  = 0x02,
+       DMA_CONFIGURED                  = 0x01,
+
+       /*
+        * Transfer end interrupt, inherited from DMAC.
+        * wait_queue used in dma_wait_for_completion.
+        */
+       DMA_TEI_CAPABLE                 = 0x02,
 };
 
 extern spinlock_t dma_spin_lock;
@@ -68,28 +71,31 @@ struct dma_ops {
 
        int (*get_residue)(struct dma_channel *chan);
        int (*xfer)(struct dma_channel *chan);
-       void (*configure)(struct dma_channel *chan, unsigned long flags);
+       int (*configure)(struct dma_channel *chan, unsigned long flags);
+       int (*extend)(struct dma_channel *chan, unsigned long op, void *param);
 };
 
 struct dma_channel {
-       char dev_id[16];
+       char dev_id[16];                /* unique name per DMAC of channel */
 
-       unsigned int chan;              /* Physical channel number */
+       unsigned int chan;              /* DMAC channel number */
        unsigned int vchan;             /* Virtual channel number */
+
        unsigned int mode;
        unsigned int count;
 
        unsigned long sar;
        unsigned long dar;
 
+       const char **caps;
+
        unsigned long flags;
        atomic_t busy;
 
-       struct semaphore sem;
        wait_queue_head_t wait_queue;
 
        struct sys_device dev;
-       char *name;
+       void *priv_data;
 };
 
 struct dma_info {
@@ -103,6 +109,12 @@ struct dma_info {
        struct dma_channel *channels;
 
        struct list_head list;
+       int first_channel_nr;
+};
+
+struct dma_chan_caps {
+       int ch_num;
+       const char **caplist;
 };
 
 #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev)
@@ -121,6 +133,8 @@ extern int dma_xfer(unsigned int chan, unsigned long from,
 #define dma_read_page(chan, from, to)  \
        dma_read(chan, from, to, PAGE_SIZE)
 
+extern int request_dma_bycap(const char **dmac, const char **caps,
+                            const char *dev_id);
 extern int request_dma(unsigned int chan, const char *dev_id);
 extern void free_dma(unsigned int chan);
 extern int get_dma_residue(unsigned int chan);
@@ -131,6 +145,10 @@ extern void dma_configure_channel(unsigned int chan, unsigned long flags);
 
 extern int register_dmac(struct dma_info *info);
 extern void unregister_dmac(struct dma_info *info);
+extern struct dma_info *get_dma_info_by_name(const char *dmac_name);
+
+extern int dma_extend(unsigned int chan, unsigned long op, void *param);
+extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist);
 
 #ifdef CONFIG_SYSFS
 /* arch/sh/drivers/dma/dma-sysfs.c */
index fc050fd7645e0e11c29fcf63cf7e1554dca148e7..43ca244564b104a6d3438a7c3d86225fc5e12290 100644 (file)
@@ -74,7 +74,7 @@ typedef struct user_fpu_struct elf_fpregset_t;
 #define ELF_ARCH       EM_SH
 
 #define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE      4096
+#define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
diff --git a/include/asm-sh/entry-macros.S b/include/asm-sh/entry-macros.S
new file mode 100644 (file)
index 0000000..500030e
--- /dev/null
@@ -0,0 +1,33 @@
+! entry.S macro define
+       
+       .macro  cli
+       stc     sr, r0
+       or      #0xf0, r0
+       ldc     r0, sr
+       .endm
+
+       .macro  sti
+       mov     #0xf0, r11
+       extu.b  r11, r11
+       not     r11, r11
+       stc     sr, r10
+       and     r11, r10
+#ifdef CONFIG_HAS_SR_RB
+       stc     k_g_imask, r11
+       or      r11, r10
+#endif
+       ldc     r10, sr
+       .endm
+
+       .macro  get_current_thread_info, ti, tmp
+#ifdef CONFIG_HAS_SR_RB
+       stc     r7_bank, \ti
+#else
+       mov     #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp
+       shll8   \tmp
+       shll2   \tmp
+       mov     r15, \ti
+       and     \tmp, \ti
+#endif 
+       .endm
+
diff --git a/include/asm-sh/irq-sh73180.h b/include/asm-sh/irq-sh73180.h
deleted file mode 100644 (file)
index b28af9a..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH73180_H
-#define __ASM_SH_IRQ_SH73180_H
-
-/*
- * linux/include/asm-sh/irq-sh73180.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA      0xA4080000UL
-#define INTC_IPRB      0xA4080004UL
-#define INTC_IPRC      0xA4080008UL
-#define INTC_IPRD      0xA408000CUL
-#define INTC_IPRE      0xA4080010UL
-#define INTC_IPRF      0xA4080014UL
-#define INTC_IPRG      0xA4080018UL
-#define INTC_IPRH      0xA408001CUL
-#define INTC_IPRI      0xA4080020UL
-#define INTC_IPRJ      0xA4080024UL
-#define INTC_IPRK      0xA4080028UL
-
-#define INTC_IMR0      0xA4080080UL
-#define INTC_IMR1      0xA4080084UL
-#define INTC_IMR2      0xA4080088UL
-#define INTC_IMR3      0xA408008CUL
-#define INTC_IMR4      0xA4080090UL
-#define INTC_IMR5      0xA4080094UL
-#define INTC_IMR6      0xA4080098UL
-#define INTC_IMR7      0xA408009CUL
-#define INTC_IMR8      0xA40800A0UL
-#define INTC_IMR9      0xA40800A4UL
-#define INTC_IMR10     0xA40800A8UL
-#define INTC_IMR11     0xA40800ACUL
-
-#define INTC_IMCR0     0xA40800C0UL
-#define INTC_IMCR1     0xA40800C4UL
-#define INTC_IMCR2     0xA40800C8UL
-#define INTC_IMCR3     0xA40800CCUL
-#define INTC_IMCR4     0xA40800D0UL
-#define INTC_IMCR5     0xA40800D4UL
-#define INTC_IMCR6     0xA40800D8UL
-#define INTC_IMCR7     0xA40800DCUL
-#define INTC_IMCR8     0xA40800E0UL
-#define INTC_IMCR9     0xA40800E4UL
-#define INTC_IMCR10    0xA40800E8UL
-#define INTC_IMCR11    0xA40800ECUL
-
-#define INTC_ICR0      0xA4140000UL
-#define INTC_ICR1      0xA414001CUL
-
-#define INTMSK0                0xa4140044
-#define INTMSKCLR0     0xa4140064
-#define INTC_INTPRI0   0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ       16
-#define TMU0_IPR_ADDR  INTC_IPRA
-#define TMU0_IPR_POS    3
-#define TMU0_PRIORITY   2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ       17
-#define TMU1_IPR_ADDR  INTC_IPRA
-#define TMU1_IPR_POS    2
-#define TMU1_PRIORITY   2
-
-/* TMU2 */
-#define TMU2_IRQ       18
-#define TMU2_IPR_ADDR  INTC_IPRA
-#define TMU2_IPR_POS    1
-#define TMU2_PRIORITY   2
-
-/* LCDC */
-#define LCDC_IRQ       28
-#define LCDC_IPR_ADDR  INTC_IPRB
-#define LCDC_IPR_POS    2
-#define LCDC_PRIORITY   2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ                52
-#define BEU_IRQ                53
-#define VEU_IRQ                54
-#define VOU_IRQ                55
-#define VIO_IPR_ADDR   INTC_IPRE
-#define VIO_IPR_POS     2
-#define VIO_PRIORITY    2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ                56
-#define MFI_IPR_ADDR   INTC_IPRE
-#define MFI_IPR_POS     1
-#define MFI_PRIORITY    2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ                60
-#define VPU_IPR_ADDR   INTC_IPRE
-#define VPU_IPR_POS     0
-#define VPU_PRIORITY    2
-
-/* 3DG */
-#define TDG_IRQ                63
-#define TDG_IPR_ADDR   INTC_IPRJ
-#define TDG_IPR_POS     2
-#define TDG_PRIORITY    2
-
-/* DMAC(1) */
-#define DMTE0_IRQ      48
-#define DMTE1_IRQ      49
-#define DMTE2_IRQ      50
-#define DMTE3_IRQ      51
-#define DMA1_IPR_ADDR  INTC_IPRE
-#define DMA1_IPR_POS   3
-#define DMA1_PRIORITY  7
-
-/* DMAC(2) */
-#define DMTE4_IRQ      76
-#define DMTE5_IRQ      77
-#define DMA2_IPR_ADDR  INTC_IPRF
-#define DMA2_IPR_POS   2
-#define DMA2_PRIORITY  7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ   80
-#define SCIF_RXI_IRQ   81
-#define SCIF_BRI_IRQ   82
-#define SCIF_TXI_IRQ   83
-#define SCIF_IPR_ADDR  INTC_IPRG
-#define SCIF_IPR_POS   3
-#define SCIF_PRIORITY  3
-
-/* SIOF0 */
-#define SIOF0_IRQ      84
-#define SIOF0_IPR_ADDR INTC_IPRH
-#define SIOF0_IPR_POS  3
-#define SIOF0_PRIORITY 3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ      92
-#define FLTEND_IRQ     93
-#define FLTRQ0_IRQ     94
-#define FLTRQ1_IRQ     95
-#define FLCTL_IPR_ADDR INTC_IPRH
-#define FLCTL_IPR_POS  1
-#define FLCTL_PRIORITY 3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ   96
-#define IIC0_TACKI_IRQ 97
-#define IIC0_WAITI_IRQ 98
-#define IIC0_DTEI_IRQ  99
-#define IIC0_IPR_ADDR  INTC_IPRH
-#define IIC0_IPR_POS   0
-#define IIC0_PRIORITY  3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ   44
-#define IIC1_TACKI_IRQ 45
-#define IIC1_WAITI_IRQ 46
-#define IIC1_DTEI_IRQ  47
-#define IIC1_IPR_ADDR  INTC_IPRG
-#define IIC1_IPR_POS   0
-#define IIC1_PRIORITY  3
-
-/* SIO0 */
-#define SIO0_IRQ       88
-#define SIO0_IPR_ADDR  INTC_IPRI
-#define SIO0_IPR_POS   3
-#define SIO0_PRIORITY  3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ        100
-#define SDHI_SDHII1_IRQ        101
-#define SDHI_SDHII2_IRQ        102
-#define SDHI_SDHII3_IRQ        103
-#define SDHI_IPR_ADDR  INTC_IPRK
-#define SDHI_IPR_POS   0
-#define SDHI_PRIORITY  3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ                108
-#define SIU_IPR_ADDR   INTC_IPRJ
-#define SIU_IPR_POS    1
-#define SIU_PRIORITY   3
-
-#define PORT_PACR      0xA4050100UL
-#define PORT_PBCR      0xA4050102UL
-#define PORT_PCCR      0xA4050104UL
-#define PORT_PDCR      0xA4050106UL
-#define PORT_PECR      0xA4050108UL
-#define PORT_PFCR      0xA405010AUL
-#define PORT_PGCR      0xA405010CUL
-#define PORT_PHCR      0xA405010EUL
-#define PORT_PJCR      0xA4050110UL
-#define PORT_PKCR      0xA4050112UL
-#define PORT_PLCR      0xA4050114UL
-#define PORT_SCPCR     0xA4050116UL
-#define PORT_PMCR      0xA4050118UL
-#define PORT_PNCR      0xA405011AUL
-#define PORT_PQCR      0xA405011CUL
-#define PORT_PRCR      0xA405011EUL
-#define PORT_PTCR      0xA405014CUL
-#define PORT_PUCR      0xA405014EUL
-#define PORT_PVCR      0xA4050150UL
-
-#define PORT_PSELA     0xA4050140UL
-#define PORT_PSELB     0xA4050142UL
-#define PORT_PSELC     0xA4050144UL
-#define PORT_PSELE     0xA4050158UL
-
-#define PORT_HIZCRA    0xA4050146UL
-#define PORT_HIZCRB    0xA4050148UL
-#define PORT_DRVCR     0xA405014AUL
-
-#define PORT_PADR      0xA4050120UL
-#define PORT_PBDR      0xA4050122UL
-#define PORT_PCDR      0xA4050124UL
-#define PORT_PDDR      0xA4050126UL
-#define PORT_PEDR      0xA4050128UL
-#define PORT_PFDR      0xA405012AUL
-#define PORT_PGDR      0xA405012CUL
-#define PORT_PHDR      0xA405012EUL
-#define PORT_PJDR      0xA4050130UL
-#define PORT_PKDR      0xA4050132UL
-#define PORT_PLDR      0xA4050134UL
-#define PORT_SCPDR     0xA4050136UL
-#define PORT_PMDR      0xA4050138UL
-#define PORT_PNDR      0xA405013AUL
-#define PORT_PQDR      0xA405013CUL
-#define PORT_PRDR      0xA405013EUL
-#define PORT_PTDR      0xA405016CUL
-#define PORT_PUDR      0xA405016EUL
-#define PORT_PVDR      0xA4050170UL
-
-#define IRQ0_IRQ       32
-#define IRQ1_IRQ       33
-#define IRQ2_IRQ       34
-#define IRQ3_IRQ       35
-#define IRQ4_IRQ       36
-#define IRQ5_IRQ       37
-#define IRQ6_IRQ       38
-#define IRQ7_IRQ       39
-
-#define INTPRI00       0xA4140010UL
-
-#define IRQ0_IPR_ADDR  INTPRI00
-#define IRQ1_IPR_ADDR  INTPRI00
-#define IRQ2_IPR_ADDR  INTPRI00
-#define IRQ3_IPR_ADDR  INTPRI00
-#define IRQ4_IPR_ADDR  INTPRI00
-#define IRQ5_IPR_ADDR  INTPRI00
-#define IRQ6_IPR_ADDR  INTPRI00
-#define IRQ7_IPR_ADDR  INTPRI00
-
-#define IRQ0_IPR_POS   7
-#define IRQ1_IPR_POS   6
-#define IRQ2_IPR_POS   5
-#define IRQ3_IPR_POS   4
-#define IRQ4_IPR_POS   3
-#define IRQ5_IPR_POS   2
-#define IRQ6_IPR_POS   1
-#define IRQ7_IPR_POS   0
-
-#define IRQ0_PRIORITY  1
-#define IRQ1_PRIORITY  1
-#define IRQ2_PRIORITY  1
-#define IRQ3_PRIORITY  1
-#define IRQ4_PRIORITY  1
-#define IRQ5_PRIORITY  1
-#define IRQ6_PRIORITY  1
-#define IRQ7_PRIORITY  1
-
-#endif /* __ASM_SH_IRQ_SH73180_H */
diff --git a/include/asm-sh/irq-sh7343.h b/include/asm-sh/irq-sh7343.h
deleted file mode 100644 (file)
index 5d15419..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7343_H
-#define __ASM_SH_IRQ_SH7343_H
-
-/*
- * linux/include/asm-sh/irq-sh7343.h
- *
- * Copyright (C) 2006 Kenati Technologies Inc.
- * Andre Mccurdy <andre@kenati.com>
- * Ranjit Deshpande <ranjit@kenati.com>
- */
-
-#undef INTC_IPRA
-#undef INTC_IPRB
-#undef INTC_IPRC
-#undef INTC_IPRD
-
-#undef DMTE0_IRQ
-#undef DMTE1_IRQ
-#undef DMTE2_IRQ
-#undef DMTE3_IRQ
-#undef DMTE4_IRQ
-#undef DMTE5_IRQ
-#undef DMTE6_IRQ
-#undef DMTE7_IRQ
-#undef DMAE_IRQ
-#undef DMA_IPR_ADDR
-#undef DMA_IPR_POS
-#undef DMA_PRIORITY
-
-#undef INTC_IMCR0
-#undef INTC_IMCR1
-#undef INTC_IMCR2
-#undef INTC_IMCR3
-#undef INTC_IMCR4
-#undef INTC_IMCR5
-#undef INTC_IMCR6
-#undef INTC_IMCR7
-#undef INTC_IMCR8
-#undef INTC_IMCR9
-#undef INTC_IMCR10
-
-
-#define INTC_IPRA      0xA4080000UL
-#define INTC_IPRB      0xA4080004UL
-#define INTC_IPRC      0xA4080008UL
-#define INTC_IPRD      0xA408000CUL
-#define INTC_IPRE      0xA4080010UL
-#define INTC_IPRF      0xA4080014UL
-#define INTC_IPRG      0xA4080018UL
-#define INTC_IPRH      0xA408001CUL
-#define INTC_IPRI      0xA4080020UL
-#define INTC_IPRJ      0xA4080024UL
-#define INTC_IPRK      0xA4080028UL
-#define INTC_IPRL      0xA408002CUL
-
-#define INTC_IMR0      0xA4080080UL
-#define INTC_IMR1      0xA4080084UL
-#define INTC_IMR2      0xA4080088UL
-#define INTC_IMR3      0xA408008CUL
-#define INTC_IMR4      0xA4080090UL
-#define INTC_IMR5      0xA4080094UL
-#define INTC_IMR6      0xA4080098UL
-#define INTC_IMR7      0xA408009CUL
-#define INTC_IMR8      0xA40800A0UL
-#define INTC_IMR9      0xA40800A4UL
-#define INTC_IMR10     0xA40800A8UL
-#define INTC_IMR11     0xA40800ACUL
-
-#define INTC_IMCR0     0xA40800C0UL
-#define INTC_IMCR1     0xA40800C4UL
-#define INTC_IMCR2     0xA40800C8UL
-#define INTC_IMCR3     0xA40800CCUL
-#define INTC_IMCR4     0xA40800D0UL
-#define INTC_IMCR5     0xA40800D4UL
-#define INTC_IMCR6     0xA40800D8UL
-#define INTC_IMCR7     0xA40800DCUL
-#define INTC_IMCR8     0xA40800E0UL
-#define INTC_IMCR9     0xA40800E4UL
-#define INTC_IMCR10    0xA40800E8UL
-#define INTC_IMCR11    0xA40800ECUL
-
-#define INTC_ICR0      0xA4140000UL
-#define INTC_ICR1      0xA414001CUL
-
-#define INTMSK0                0xa4140044
-#define INTMSKCLR0     0xa4140064
-#define INTC_INTPRI0   0xa4140010
-
-/*
-  NOTE:
-
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-
-/* TMU0 */
-#define TMU0_IRQ       16
-#define TMU0_IPR_ADDR  INTC_IPRA
-#define TMU0_IPR_POS    3
-#define TMU0_PRIORITY   2
-
-#define TIMER_IRQ       16
-#define TIMER_IPR_ADDR  INTC_IPRA
-#define TIMER_IPR_POS    3
-#define TIMER_PRIORITY   2
-
-/* TMU1 */
-#define TMU1_IRQ       17
-#define TMU1_IPR_ADDR  INTC_IPRA
-#define TMU1_IPR_POS    2
-#define TMU1_PRIORITY   2
-
-/* TMU2 */
-#define TMU2_IRQ       18
-#define TMU2_IPR_ADDR  INTC_IPRA
-#define TMU2_IPR_POS    1
-#define TMU2_PRIORITY   2
-
-/* LCDC */
-#define LCDC_IRQ       28
-#define LCDC_IPR_ADDR  INTC_IPRB
-#define LCDC_IPR_POS    2
-#define LCDC_PRIORITY   2
-
-/* VIO (Video I/O) */
-#define CEU_IRQ                52
-#define BEU_IRQ                53
-#define VEU_IRQ                54
-#define VOU_IRQ                55
-#define VIO_IPR_ADDR   INTC_IPRE
-#define VIO_IPR_POS     2
-#define VIO_PRIORITY    2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ                56
-#define MFI_IPR_ADDR   INTC_IPRE
-#define MFI_IPR_POS     1
-#define MFI_PRIORITY    2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ                60
-#define VPU_IPR_ADDR   INTC_IPRE
-#define VPU_IPR_POS     0
-#define VPU_PRIORITY    2
-
-/* 3DG */
-#define TDG_IRQ                63
-#define TDG_IPR_ADDR   INTC_IPRJ
-#define TDG_IPR_POS     2
-#define TDG_PRIORITY    2
-
-/* DMAC(1) */
-#define DMTE0_IRQ      48
-#define DMTE1_IRQ      49
-#define DMTE2_IRQ      50
-#define DMTE3_IRQ      51
-#define DMA1_IPR_ADDR  INTC_IPRE
-#define DMA1_IPR_POS   3
-#define DMA1_PRIORITY  7
-
-/* DMAC(2) */
-#define DMTE4_IRQ      76
-#define DMTE5_IRQ      77
-#define DMA2_IPR_ADDR  INTC_IPRF
-#define DMA2_IPR_POS   2
-#define DMA2_PRIORITY  7
-
-/* SCIF0 */
-#define SCIF_ERI_IRQ   80
-#define SCIF_RXI_IRQ   81
-#define SCIF_BRI_IRQ   82
-#define SCIF_TXI_IRQ   83
-#define SCIF_IPR_ADDR  INTC_IPRG
-#define SCIF_IPR_POS   3
-#define SCIF_PRIORITY  3
-
-/* SIOF0 */
-#define SIOF0_IRQ      84
-#define SIOF0_IPR_ADDR INTC_IPRH
-#define SIOF0_IPR_POS  3
-#define SIOF0_PRIORITY 3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ      92
-#define FLTEND_IRQ     93
-#define FLTRQ0_IRQ     94
-#define FLTRQ1_IRQ     95
-#define FLCTL_IPR_ADDR INTC_IPRH
-#define FLCTL_IPR_POS  1
-#define FLCTL_PRIORITY 3
-
-/* IIC(0) (IIC Bus Interface) */
-#define IIC0_ALI_IRQ   96
-#define IIC0_TACKI_IRQ 97
-#define IIC0_WAITI_IRQ 98
-#define IIC0_DTEI_IRQ  99
-#define IIC0_IPR_ADDR  INTC_IPRH
-#define IIC0_IPR_POS   0
-#define IIC0_PRIORITY  3
-
-/* IIC(1) (IIC Bus Interface) */
-#define IIC1_ALI_IRQ   44
-#define IIC1_TACKI_IRQ 45
-#define IIC1_WAITI_IRQ 46
-#define IIC1_DTEI_IRQ  47
-#define IIC1_IPR_ADDR  INTC_IPRI
-#define IIC1_IPR_POS   0
-#define IIC1_PRIORITY  3
-
-/* SIO0 */
-#define SIO0_IRQ       88
-#define SIO0_IPR_ADDR  INTC_IPRI
-#define SIO0_IPR_POS   3
-#define SIO0_PRIORITY  3
-
-/* SDHI */
-#define SDHI_SDHII0_IRQ        100
-#define SDHI_SDHII1_IRQ        101
-#define SDHI_SDHII2_IRQ        102
-#define SDHI_SDHII3_IRQ        103
-#define SDHI_IPR_ADDR  INTC_IPRK
-#define SDHI_IPR_POS   0
-#define SDHI_PRIORITY  3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ                108
-#define SIU_IPR_ADDR   INTC_IPRJ
-#define SIU_IPR_POS    1
-#define SIU_PRIORITY   3
-
-#define PORT_PACR      0xA4050100UL
-#define PORT_PBCR      0xA4050102UL
-#define PORT_PCCR      0xA4050104UL
-#define PORT_PDCR      0xA4050106UL
-#define PORT_PECR      0xA4050108UL
-#define PORT_PFCR      0xA405010AUL
-#define PORT_PGCR      0xA405010CUL
-#define PORT_PHCR      0xA405010EUL
-#define PORT_PJCR      0xA4050110UL
-#define PORT_PKCR      0xA4050112UL
-#define PORT_PLCR      0xA4050114UL
-#define PORT_SCPCR     0xA4050116UL
-#define PORT_PMCR      0xA4050118UL
-#define PORT_PNCR      0xA405011AUL
-#define PORT_PQCR      0xA405011CUL
-#define PORT_PRCR      0xA405011EUL
-#define PORT_PTCR      0xA405014CUL
-#define PORT_PUCR      0xA405014EUL
-#define PORT_PVCR      0xA4050150UL
-
-#define PORT_PSELA     0xA4050140UL
-#define PORT_PSELB     0xA4050142UL
-#define PORT_PSELC     0xA4050144UL
-#define PORT_PSELE     0xA4050158UL
-
-#define PORT_HIZCRA    0xA4050146UL
-#define PORT_HIZCRB    0xA4050148UL
-#define PORT_DRVCR     0xA405014AUL
-
-#define PORT_PADR      0xA4050120UL
-#define PORT_PBDR      0xA4050122UL
-#define PORT_PCDR      0xA4050124UL
-#define PORT_PDDR      0xA4050126UL
-#define PORT_PEDR      0xA4050128UL
-#define PORT_PFDR      0xA405012AUL
-#define PORT_PGDR      0xA405012CUL
-#define PORT_PHDR      0xA405012EUL
-#define PORT_PJDR      0xA4050130UL
-#define PORT_PKDR      0xA4050132UL
-#define PORT_PLDR      0xA4050134UL
-#define PORT_SCPDR     0xA4050136UL
-#define PORT_PMDR      0xA4050138UL
-#define PORT_PNDR      0xA405013AUL
-#define PORT_PQDR      0xA405013CUL
-#define PORT_PRDR      0xA405013EUL
-#define PORT_PTDR      0xA405016CUL
-#define PORT_PUDR      0xA405016EUL
-#define PORT_PVDR      0xA4050170UL
-
-#define IRQ0_IRQ       32
-#define IRQ1_IRQ       33
-#define IRQ2_IRQ       34
-#define IRQ3_IRQ       35
-#define IRQ4_IRQ       36
-#define IRQ5_IRQ       37
-#define IRQ6_IRQ       38
-#define IRQ7_IRQ       39
-
-#define INTPRI00       0xA4140010UL
-
-#define IRQ0_IPR_ADDR  INTPRI00
-#define IRQ1_IPR_ADDR  INTPRI00
-#define IRQ2_IPR_ADDR  INTPRI00
-#define IRQ3_IPR_ADDR  INTPRI00
-#define IRQ4_IPR_ADDR  INTPRI00
-#define IRQ5_IPR_ADDR  INTPRI00
-#define IRQ6_IPR_ADDR  INTPRI00
-#define IRQ7_IPR_ADDR  INTPRI00
-
-#define IRQ0_IPR_POS   7
-#define IRQ1_IPR_POS   6
-#define IRQ2_IPR_POS   5
-#define IRQ3_IPR_POS   4
-#define IRQ4_IPR_POS   3
-#define IRQ5_IPR_POS   2
-#define IRQ6_IPR_POS   1
-#define IRQ7_IPR_POS   0
-
-#define IRQ0_PRIORITY  1
-#define IRQ1_PRIORITY  1
-#define IRQ2_PRIORITY  1
-#define IRQ3_PRIORITY  1
-#define IRQ4_PRIORITY  1
-#define IRQ5_PRIORITY  1
-#define IRQ6_PRIORITY  1
-#define IRQ7_PRIORITY  1
-
-#endif /* __ASM_SH_IRQ_SH7343_H */
diff --git a/include/asm-sh/irq-sh7780.h b/include/asm-sh/irq-sh7780.h
deleted file mode 100644 (file)
index 19912ae..0000000
+++ /dev/null
@@ -1,311 +0,0 @@
-#ifndef __ASM_SH_IRQ_SH7780_H
-#define __ASM_SH_IRQ_SH7780_H
-
-/*
- * linux/include/asm-sh/irq-sh7780.h
- *
- * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp>
- */
-#define INTC_BASE      0xffd00000
-#define INTC_ICR0      (INTC_BASE+0x0)
-#define INTC_ICR1      (INTC_BASE+0x1c)
-#define INTC_INTPRI    (INTC_BASE+0x10)
-#define INTC_INTREQ    (INTC_BASE+0x24)
-#define INTC_INTMSK0   (INTC_BASE+0x44)
-#define INTC_INTMSK1   (INTC_BASE+0x48)
-#define INTC_INTMSK2   (INTC_BASE+0x40080)
-#define INTC_INTMSKCLR0        (INTC_BASE+0x64)
-#define INTC_INTMSKCLR1        (INTC_BASE+0x68)
-#define INTC_INTMSKCLR2        (INTC_BASE+0x40084)
-#define INTC_NMIFCR    (INTC_BASE+0xc0)
-#define INTC_USERIMASK (INTC_BASE+0x30000)
-
-#define        INTC_INT2PRI0   (INTC_BASE+0x40000)
-#define        INTC_INT2PRI1   (INTC_BASE+0x40004)
-#define        INTC_INT2PRI2   (INTC_BASE+0x40008)
-#define        INTC_INT2PRI3   (INTC_BASE+0x4000c)
-#define        INTC_INT2PRI4   (INTC_BASE+0x40010)
-#define        INTC_INT2PRI5   (INTC_BASE+0x40014)
-#define        INTC_INT2PRI6   (INTC_BASE+0x40018)
-#define        INTC_INT2PRI7   (INTC_BASE+0x4001c)
-#define        INTC_INT2A0     (INTC_BASE+0x40030)
-#define        INTC_INT2A1     (INTC_BASE+0x40034)
-#define        INTC_INT2MSKR   (INTC_BASE+0x40038)
-#define        INTC_INT2MSKCR  (INTC_BASE+0x4003c)
-#define        INTC_INT2B0     (INTC_BASE+0x40040)
-#define        INTC_INT2B1     (INTC_BASE+0x40044)
-#define        INTC_INT2B2     (INTC_BASE+0x40048)
-#define        INTC_INT2B3     (INTC_BASE+0x4004c)
-#define        INTC_INT2B4     (INTC_BASE+0x40050)
-#define        INTC_INT2B5     (INTC_BASE+0x40054)
-#define        INTC_INT2B6     (INTC_BASE+0x40058)
-#define        INTC_INT2B7     (INTC_BASE+0x4005c)
-#define        INTC_INT2GPIC   (INTC_BASE+0x40090)
-/*
-  NOTE:
-  *_IRQ = (INTEVT2 - 0x200)/0x20
-*/
-/* IRQ 0-7 line external int*/
-#define IRQ0_IRQ       2
-#define IRQ0_IPR_ADDR  INTC_INTPRI
-#define IRQ0_IPR_POS   7
-#define IRQ0_PRIORITY  2
-
-#define IRQ1_IRQ       4
-#define IRQ1_IPR_ADDR  INTC_INTPRI
-#define IRQ1_IPR_POS   6
-#define IRQ1_PRIORITY  2
-
-#define IRQ2_IRQ       6
-#define IRQ2_IPR_ADDR  INTC_INTPRI
-#define IRQ2_IPR_POS   5
-#define IRQ2_PRIORITY  2
-
-#define IRQ3_IRQ       8
-#define IRQ3_IPR_ADDR  INTC_INTPRI
-#define IRQ3_IPR_POS   4
-#define IRQ3_PRIORITY  2
-
-#define IRQ4_IRQ       10
-#define IRQ4_IPR_ADDR  INTC_INTPRI
-#define IRQ4_IPR_POS   3
-#define IRQ4_PRIORITY  2
-
-#define IRQ5_IRQ       12
-#define IRQ5_IPR_ADDR  INTC_INTPRI
-#define IRQ5_IPR_POS   2
-#define IRQ5_PRIORITY  2
-
-#define IRQ6_IRQ       14
-#define IRQ6_IPR_ADDR  INTC_INTPRI
-#define IRQ6_IPR_POS   1
-#define IRQ6_PRIORITY  2
-
-#define IRQ7_IRQ       0
-#define IRQ7_IPR_ADDR  INTC_INTPRI
-#define IRQ7_IPR_POS   0
-#define IRQ7_PRIORITY  2
-
-/* TMU */
-/* ch0 */
-#define TMU_IRQ                28
-#define        TMU_IPR_ADDR    INTC_INT2PRI0
-#define        TMU_IPR_POS     3
-#define TMU_PRIORITY   2
-
-#define TIMER_IRQ      28
-#define        TIMER_IPR_ADDR  INTC_INT2PRI0
-#define        TIMER_IPR_POS   3
-#define TIMER_PRIORITY 2
-
-/* ch 1*/
-#define TMU_CH1_IRQ            29
-#define        TMU_CH1_IPR_ADDR        INTC_INT2PRI0
-#define        TMU_CH1_IPR_POS         2
-#define TMU_CH1_PRIORITY       2
-
-#define TIMER1_IRQ     29
-#define        TIMER1_IPR_ADDR INTC_INT2PRI0
-#define        TIMER1_IPR_POS  2
-#define TIMER1_PRIORITY        2
-
-/* ch 2*/
-#define TMU_CH2_IRQ            30
-#define        TMU_CH2_IPR_ADDR        INTC_INT2PRI0
-#define        TMU_CH2_IPR_POS         1
-#define TMU_CH2_PRIORITY       2
-/* ch 2 Input capture */
-#define TMU_CH2IC_IRQ          31
-#define        TMU_CH2IC_IPR_ADDR      INTC_INT2PRI0
-#define        TMU_CH2IC_IPR_POS       0
-#define TMU_CH2IC_PRIORITY     2
-/* ch 3 */
-#define TMU_CH3_IRQ            96
-#define        TMU_CH3_IPR_ADDR        INTC_INT2PRI1
-#define        TMU_CH3_IPR_POS         3
-#define TMU_CH3_PRIORITY       2
-/* ch 4 */
-#define TMU_CH4_IRQ            97
-#define        TMU_CH4_IPR_ADDR        INTC_INT2PRI1
-#define        TMU_CH4_IPR_POS         2
-#define TMU_CH4_PRIORITY       2
-/* ch 5*/
-#define TMU_CH5_IRQ            98
-#define        TMU_CH5_IPR_ADDR        INTC_INT2PRI1
-#define        TMU_CH5_IPR_POS         1
-#define TMU_CH5_PRIORITY       2
-
-/* SCIF0 */
-#define SCIF0_ERI_IRQ  40
-#define SCIF0_RXI_IRQ  41
-#define SCIF0_BRI_IRQ  42
-#define SCIF0_TXI_IRQ  43
-#define        SCIF0_IPR_ADDR  INTC_INT2PRI2
-#define        SCIF0_IPR_POS   3
-#define SCIF0_PRIORITY 3
-
-/* SCIF1 */
-#define SCIF1_ERI_IRQ  76
-#define SCIF1_RXI_IRQ  77
-#define SCIF1_BRI_IRQ  78
-#define SCIF1_TXI_IRQ  79
-#define        SCIF1_IPR_ADDR  INTC_INT2PRI2
-#define        SCIF1_IPR_POS   2
-#define SCIF1_PRIORITY 3
-
-#define        WDT_IRQ         27
-#define        WDT_IPR_ADDR    INTC_INT2PRI2
-#define        WDT_IPR_POS     1
-#define        WDT_PRIORITY    2
-
-/* DMAC(0) */
-#define        DMINT0_IRQ      34
-#define        DMINT1_IRQ      35
-#define        DMINT2_IRQ      36
-#define        DMINT3_IRQ      37
-#define        DMINT4_IRQ      44
-#define        DMINT5_IRQ      45
-#define        DMINT6_IRQ      46
-#define        DMINT7_IRQ      47
-#define        DMAE_IRQ        38
-#define        DMA0_IPR_ADDR   INTC_INT2PRI3
-#define        DMA0_IPR_POS    2
-#define        DMA0_PRIORITY   7
-
-/* DMAC(1) */
-#define        DMINT8_IRQ      92
-#define        DMINT9_IRQ      93
-#define        DMINT10_IRQ     94
-#define        DMINT11_IRQ     95
-#define        DMA1_IPR_ADDR   INTC_INT2PRI3
-#define        DMA1_IPR_POS    1
-#define        DMA1_PRIORITY   7
-
-#define        DMTE0_IRQ       DMINT0_IRQ
-#define        DMTE4_IRQ       DMINT4_IRQ
-#define        DMA_IPR_ADDR    DMA0_IPR_ADDR
-#define        DMA_IPR_POS     DMA0_IPR_POS
-#define        DMA_PRIORITY    DMA0_PRIORITY
-
-/* CMT */
-#define        CMT_IRQ         56
-#define        CMT_IPR_ADDR    INTC_INT2PRI4
-#define        CMT_IPR_POS     3
-#define        CMT_PRIORITY    0
-
-/* HAC */
-#define        HAC_IRQ         60
-#define        HAC_IPR_ADDR    INTC_INT2PRI4
-#define        HAC_IPR_POS     2
-#define        CMT_PRIORITY    0
-
-/* PCIC(0) */
-#define        PCIC0_IRQ       64
-#define        PCIC0_IPR_ADDR  INTC_INT2PRI4
-#define        PCIC0_IPR_POS   1
-#define        PCIC0_PRIORITY  2
-
-/* PCIC(1) */
-#define        PCIC1_IRQ       65
-#define        PCIC1_IPR_ADDR  INTC_INT2PRI4
-#define        PCIC1_IPR_POS   0
-#define        PCIC1_PRIORITY  2
-
-/* PCIC(2) */
-#define        PCIC2_IRQ       66
-#define        PCIC2_IPR_ADDR  INTC_INT2PRI5
-#define        PCIC2_IPR_POS   3
-#define        PCIC2_PRIORITY  2
-
-/* PCIC(3) */
-#define        PCIC3_IRQ       67
-#define        PCIC3_IPR_ADDR  INTC_INT2PRI5
-#define        PCIC3_IPR_POS   2
-#define        PCIC3_PRIORITY  2
-
-/* PCIC(4) */
-#define        PCIC4_IRQ       68
-#define        PCIC4_IPR_ADDR  INTC_INT2PRI5
-#define        PCIC4_IPR_POS   1
-#define        PCIC4_PRIORITY  2
-
-/* PCIC(5) */
-#define        PCICERR_IRQ     69
-#define        PCICPWD3_IRQ    70
-#define        PCICPWD2_IRQ    71
-#define        PCICPWD1_IRQ    72
-#define        PCICPWD0_IRQ    73
-#define        PCIC5_IPR_ADDR  INTC_INT2PRI5
-#define        PCIC5_IPR_POS   0
-#define        PCIC5_PRIORITY  2
-
-/* SIOF */
-#define        SIOF_IRQ        80
-#define        SIOF_IPR_ADDR   INTC_INT2PRI6
-#define        SIOF_IPR_POS    3
-#define        SIOF_PRIORITY   3
-
-/* HSPI */
-#define        HSPI_IRQ        84
-#define        HSPI_IPR_ADDR   INTC_INT2PRI6
-#define        HSPI_IPR_POS    2
-#define        HSPI_PRIORITY   3
-
-/* MMCIF */
-#define        MMCIF_FSTAT_IRQ 88
-#define        MMCIF_TRAN_IRQ  89
-#define        MMCIF_ERR_IRQ   90
-#define        MMCIF_FRDY_IRQ  91
-#define        MMCIF_IPR_ADDR  INTC_INT2PRI6
-#define        MMCIF_IPR_POS   1
-#define        HSPI_PRIORITY   3
-
-/* SSI */
-#define        SSI_IRQ         100
-#define        SSI_IPR_ADDR    INTC_INT2PRI6
-#define        SSI_IPR_POS     0
-#define        SSI_PRIORITY    3
-
-/* FLCTL */
-#define        FLCTL_FLSTE_IRQ         104
-#define        FLCTL_FLTEND_IRQ        105
-#define        FLCTL_FLTRQ0_IRQ        106
-#define        FLCTL_FLTRQ1_IRQ        107
-#define        FLCTL_IPR_ADDR          INTC_INT2PRI7
-#define        FLCTL_IPR_POS           3
-#define        FLCTL_PRIORITY          3
-
-/* GPIO */
-#define        GPIO0_IRQ       108
-#define        GPIO1_IRQ       109
-#define        GPIO2_IRQ       110
-#define        GPIO3_IRQ       111
-#define        GPIO_IPR_ADDR   INTC_INT2PRI7
-#define        GPIO_IPR_POS    2
-#define        GPIO_PRIORITY   3
-
-#define        INTC_TMU0_MSK   0
-#define        INTC_TMU3_MSK   1
-#define        INTC_RTC_MSK    2
-#define        INTC_SCIF0_MSK  3
-#define        INTC_SCIF1_MSK  4
-#define        INTC_WDT_MSK    5
-#define        INTC_HUID_MSK   7
-#define        INTC_DMAC0_MSK  8
-#define        INTC_DMAC1_MSK  9
-#define        INTC_CMT_MSK    12
-#define        INTC_HAC_MSK    13
-#define        INTC_PCIC0_MSK  14
-#define        INTC_PCIC1_MSK  15
-#define        INTC_PCIC2_MSK  16
-#define        INTC_PCIC3_MSK  17
-#define        INTC_PCIC4_MSK  18
-#define        INTC_PCIC5_MSK  19
-#define        INTC_SIOF_MSK   20
-#define        INTC_HSPI_MSK   21
-#define        INTC_MMCIF_MSK  22
-#define        INTC_SSI_MSK    23
-#define        INTC_FLCTL_MSK  24
-#define        INTC_GPIO_MSK   25
-
-#endif /* __ASM_SH_IRQ_SH7780_H */
index 6cd3e9e2a76ac62334f28a379e610fc9a0599df4..fd576088e47edd0085ba8aa1f702a3c1b44dffd0 100644 (file)
@@ -1,233 +1,9 @@
 #ifndef __ASM_SH_IRQ_H
 #define __ASM_SH_IRQ_H
 
-/*
- *
- * linux/include/asm-sh/irq.h
- *
- * Copyright (C) 1999  Niibe Yutaka & Takeshi Yaegashi
- * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003  Paul Mundt
- *
- */
-
 #include <asm/machvec.h>
 #include <asm/ptrace.h>                /* for pt_regs */
 
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-
-#define INTC_DMAC0_MSK 0
-
-#if defined(CONFIG_CPU_SH3)
-#define INTC_IPRA      0xfffffee2UL
-#define INTC_IPRB      0xfffffee4UL
-#elif defined(CONFIG_CPU_SH4)
-#define INTC_IPRA      0xffd00004UL
-#define INTC_IPRB      0xffd00008UL
-#define INTC_IPRC      0xffd0000cUL
-#define INTC_IPRD      0xffd00010UL
-#endif
-
-#define TIMER_IRQ      16
-#define TIMER_IPR_ADDR INTC_IPRA
-#define TIMER_IPR_POS   3
-#define TIMER_PRIORITY  2
-
-#define TIMER1_IRQ     17
-#define TIMER1_IPR_ADDR        INTC_IPRA
-#define TIMER1_IPR_POS  2
-#define TIMER1_PRIORITY         4
-
-#define RTC_IRQ                22
-#define RTC_IPR_ADDR   INTC_IPRA
-#define RTC_IPR_POS     0
-#define RTC_PRIORITY   TIMER_PRIORITY
-
-#if defined(CONFIG_CPU_SH3)
-#define DMTE0_IRQ      48
-#define DMTE1_IRQ      49
-#define DMTE2_IRQ      50
-#define DMTE3_IRQ      51
-#define DMA_IPR_ADDR   INTC_IPRE
-#define DMA_IPR_POS    3
-#define DMA_PRIORITY   7
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-/* TMU2 */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ                27
-#define WDT_IPR_ADDR   INTC_IPRB
-#define WDT_IPR_POS     3
-#define WDT_PRIORITY    2
-
-/* SIM (SIM Card Module) */
-#define SIM_ERI_IRQ    23
-#define SIM_RXI_IRQ    24
-#define SIM_TXI_IRQ    25
-#define SIM_TEND_IRQ   26
-#define SIM_IPR_ADDR   INTC_IPRB
-#define SIM_IPR_POS     1
-#define SIM_PRIORITY    2
-
-/* VIO (Video I/O) */
-#define VIO_IRQ                52
-#define VIO_IPR_ADDR   INTC_IPRE
-#define VIO_IPR_POS     2
-#define VIO_PRIORITY    2
-
-/* MFI (Multi Functional Interface) */
-#define MFI_IRQ                56
-#define MFI_IPR_ADDR   INTC_IPRE
-#define MFI_IPR_POS     1
-#define MFI_PRIORITY    2
-
-/* VPU (Video Processing Unit) */
-#define VPU_IRQ                60
-#define VPU_IPR_ADDR   INTC_IPRE
-#define VPU_IPR_POS     0
-#define VPU_PRIORITY    2
-
-/* KEY (Key Scan Interface) */
-#define KEY_IRQ                79
-#define KEY_IPR_ADDR   INTC_IPRF
-#define KEY_IPR_POS     3
-#define KEY_PRIORITY    2
-
-/* CMT (Compare Match Timer) */
-#define CMT_IRQ                104
-#define CMT_IPR_ADDR   INTC_IPRF
-#define CMT_IPR_POS     0
-#define CMT_PRIORITY    2
-
-/* DMAC(1) */
-#define DMTE0_IRQ      48
-#define DMTE1_IRQ      49
-#define DMTE2_IRQ      50
-#define DMTE3_IRQ      51
-#define DMA1_IPR_ADDR  INTC_IPRE
-#define DMA1_IPR_POS   3
-#define DMA1_PRIORITY  7
-
-/* DMAC(2) */
-#define DMTE4_IRQ      76
-#define DMTE5_IRQ      77
-#define DMA2_IPR_ADDR  INTC_IPRF
-#define DMA2_IPR_POS   2
-#define DMA2_PRIORITY  7
-
-/* SIOF0 */
-#define SIOF0_IRQ      84
-#define SIOF0_IPR_ADDR INTC_IPRH
-#define SIOF0_IPR_POS  3
-#define SIOF0_PRIORITY 3
-
-/* FLCTL (Flash Memory Controller) */
-#define FLSTE_IRQ      92
-#define FLTEND_IRQ     93
-#define FLTRQ0_IRQ     94
-#define FLTRQ1_IRQ     95
-#define FLCTL_IPR_ADDR INTC_IPRH
-#define FLCTL_IPR_POS  1
-#define FLCTL_PRIORITY 3
-
-/* IIC (IIC Bus Interface) */
-#define IIC_ALI_IRQ    96
-#define IIC_TACKI_IRQ  97
-#define IIC_WAITI_IRQ  98
-#define IIC_DTEI_IRQ   99
-#define IIC_IPR_ADDR   INTC_IPRH
-#define IIC_IPR_POS    0
-#define IIC_PRIORITY   3
-
-/* SIO0 */
-#define SIO0_IRQ       88
-#define SIO0_IPR_ADDR  INTC_IPRI
-#define SIO0_IPR_POS   3
-#define SIO0_PRIORITY  3
-
-/* SIU (Sound Interface Unit) */
-#define SIU_IRQ                108
-#define SIU_IPR_ADDR   INTC_IPRJ
-#define SIU_IPR_POS    1
-#define SIU_PRIORITY   3
-
-#endif
-#elif defined(CONFIG_CPU_SH4)
-#define DMTE0_IRQ      34
-#define DMTE1_IRQ      35
-#define DMTE2_IRQ      36
-#define DMTE3_IRQ      37
-#define DMTE4_IRQ      44      /* 7751R only */
-#define DMTE5_IRQ      45      /* 7751R only */
-#define DMTE6_IRQ      46      /* 7751R only */
-#define DMTE7_IRQ      47      /* 7751R only */
-#define DMAE_IRQ       38
-#define DMA_IPR_ADDR   INTC_IPRC
-#define DMA_IPR_POS    2
-#define DMA_PRIORITY   7
-#endif
-
-#if defined (CONFIG_CPU_SUBTYPE_SH7707) || defined (CONFIG_CPU_SUBTYPE_SH7708) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7709) || defined (CONFIG_CPU_SUBTYPE_SH7750) || \
-    defined (CONFIG_CPU_SUBTYPE_SH7751) || defined (CONFIG_CPU_SUBTYPE_SH7706)
-#define SCI_ERI_IRQ    23
-#define SCI_RXI_IRQ    24
-#define SCI_TXI_IRQ    25
-#define SCI_IPR_ADDR   INTC_IPRB
-#define SCI_IPR_POS    1
-#define SCI_PRIORITY   3
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#define SCIF0_IRQ      80
-#define SCIF0_IPR_ADDR INTC_IPRG
-#define SCIF0_IPR_POS  3
-#define SCIF0_PRIORITY 3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709)
-#define SCIF_ERI_IRQ   56
-#define SCIF_RXI_IRQ   57
-#define SCIF_BRI_IRQ   58
-#define SCIF_TXI_IRQ   59
-#define SCIF_IPR_ADDR  INTC_IPRE
-#define SCIF_IPR_POS   1
-#define SCIF_PRIORITY  3
-
-#define IRDA_ERI_IRQ   52
-#define IRDA_RXI_IRQ   53
-#define IRDA_BRI_IRQ   54
-#define IRDA_TXI_IRQ   55
-#define IRDA_IPR_ADDR  INTC_IPRE
-#define IRDA_IPR_POS   2
-#define IRDA_PRIORITY  3
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-      defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define SCIF_ERI_IRQ   40
-#define SCIF_RXI_IRQ   41
-#define SCIF_BRI_IRQ   42
-#define SCIF_TXI_IRQ   43
-#define SCIF_IPR_ADDR  INTC_IPRC
-#define SCIF_IPR_POS   1
-#define SCIF_PRIORITY  3
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define SCIF1_ERI_IRQ  23
-#define SCIF1_RXI_IRQ  24
-#define SCIF1_BRI_IRQ  25
-#define SCIF1_TXI_IRQ  26
-#define SCIF1_IPR_ADDR INTC_IPRB
-#define SCIF1_IPR_POS  1
-#define SCIF1_PRIORITY 3
-#endif /* ST40STB1 */
-
-#endif /* 775x / SH4-202 / ST40STB1 */
-#endif /* 7780 */
-
 /* NR_IRQS is made from three components:
  *   1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules
  *   2. PINT_NR_IRQS   - number of PINT interrupts
 # define ONCHIP_NR_IRQS 109
 #elif defined(CONFIG_CPU_SUBTYPE_SH7780)
 # define ONCHIP_NR_IRQS 111
+#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
+# define ONCHIP_NR_IRQS 256
+#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
+# define ONCHIP_NR_IRQS 128
 #elif defined(CONFIG_SH_UNKNOWN)       /* Most be last */
 # define ONCHIP_NR_IRQS 144
 #endif
 /* NR_IRQS. 1+2+3 */
 #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS)
 
-extern void disable_irq(unsigned int);
-extern void disable_irq_nosync(unsigned int);
-extern void enable_irq(unsigned int);
+/*
+ * Convert back and forth between INTEVT and IRQ values.
+ */
+#define evt2irq(evt)           (((evt) >> 5) - 16)
+#define irq2evt(irq)           (((irq) + 16) << 5)
 
 /*
  * Simple Mask Register Support
@@ -327,362 +109,36 @@ extern unsigned short *irq_mask_register;
  */
 void init_IRQ_pint(void);
 
+/*
+ * The shift value is now the number of bits to shift, not the number of
+ * bits/4. This is to make it easier to read the value directly from the
+ * datasheets. The IPR address, addr, will be set from ipr_idx via the
+ * map_ipridx_to_addr function.
+ */
 struct ipr_data {
        unsigned int irq;
-       unsigned int addr;      /* Address of Interrupt Priority Register */
-       int shift;              /* Shifts of the 16-bit data */
+       int ipr_idx;            /* Index for the IPR registered */
+       int shift;              /* Number of bits to shift the data */
        int priority;           /* The priority */
+       unsigned int addr;      /* Address of Interrupt Priority Register */
 };
 
 /*
- * Function for "on chip support modules".
+ * Given an IPR IDX, map the value to an IPR register address.
  */
-extern void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
-extern void make_imask_irq(unsigned int irq);
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA      0xA414FEE2UL
-#define INTC_IPRB      0xA414FEE4UL
-#define INTC_IPRC      0xA4140016UL
-#define INTC_IPRD      0xA4140018UL
-#define INTC_IPRE      0xA414001AUL
-#define INTC_IPRF      0xA4080000UL
-#define INTC_IPRG      0xA4080002UL
-#define INTC_IPRH      0xA4080004UL
-#define INTC_IPRI      0xA4080006UL
-#define INTC_IPRJ      0xA4080008UL
-
-#define INTC_IMR0      0xA4080040UL
-#define INTC_IMR1      0xA4080042UL
-#define INTC_IMR2      0xA4080044UL
-#define INTC_IMR3      0xA4080046UL
-#define INTC_IMR4      0xA4080048UL
-#define INTC_IMR5      0xA408004AUL
-#define INTC_IMR6      0xA408004CUL
-#define INTC_IMR7      0xA408004EUL
-#define INTC_IMR8      0xA4080050UL
-#define INTC_IMR9      0xA4080052UL
-#define INTC_IMR10     0xA4080054UL
-
-#define INTC_IMCR0     0xA4080060UL
-#define INTC_IMCR1     0xA4080062UL
-#define INTC_IMCR2     0xA4080064UL
-#define INTC_IMCR3     0xA4080066UL
-#define INTC_IMCR4     0xA4080068UL
-#define INTC_IMCR5     0xA408006AUL
-#define INTC_IMCR6     0xA408006CUL
-#define INTC_IMCR7     0xA408006EUL
-#define INTC_IMCR8     0xA4080070UL
-#define INTC_IMCR9     0xA4080072UL
-#define INTC_IMCR10    0xA4080074UL
-
-#define INTC_ICR0      0xA414FEE0UL
-#define INTC_ICR1      0xA4140010UL
-
-#define INTC_IRR0      0xA4140004UL
-
-#define PORT_PACR      0xA4050100UL
-#define PORT_PBCR      0xA4050102UL
-#define PORT_PCCR      0xA4050104UL
-#define PORT_PDCR      0xA4050106UL
-#define PORT_PECR      0xA4050108UL
-#define PORT_PFCR      0xA405010AUL
-#define PORT_PGCR      0xA405010CUL
-#define PORT_PHCR      0xA405010EUL
-#define PORT_PJCR      0xA4050110UL
-#define PORT_PKCR      0xA4050112UL
-#define PORT_PLCR      0xA4050114UL
-#define PORT_SCPCR     0xA4050116UL
-#define PORT_PMCR      0xA4050118UL
-#define PORT_PNCR      0xA405011AUL
-#define PORT_PQCR      0xA405011CUL
-
-#define PORT_PSELA     0xA4050140UL
-#define PORT_PSELB     0xA4050142UL
-#define PORT_PSELC     0xA4050144UL
-
-#define PORT_HIZCRA    0xA4050146UL
-#define PORT_HIZCRB    0xA4050148UL
-#define PORT_DRVCR     0xA4050150UL
-
-#define PORT_PADR      0xA4050120UL
-#define PORT_PBDR      0xA4050122UL
-#define PORT_PCDR      0xA4050124UL
-#define PORT_PDDR      0xA4050126UL
-#define PORT_PEDR      0xA4050128UL
-#define PORT_PFDR      0xA405012AUL
-#define PORT_PGDR      0xA405012CUL
-#define PORT_PHDR      0xA405012EUL
-#define PORT_PJDR      0xA4050130UL
-#define PORT_PKDR      0xA4050132UL
-#define PORT_PLDR      0xA4050134UL
-#define PORT_SCPDR     0xA4050136UL
-#define PORT_PMDR      0xA4050138UL
-#define PORT_PNDR      0xA405013AUL
-#define PORT_PQDR      0xA405013CUL
-
-#define IRQ0_IRQ       32
-#define IRQ1_IRQ       33
-#define IRQ2_IRQ       34
-#define IRQ3_IRQ       35
-#define IRQ4_IRQ       36
-#define IRQ5_IRQ       37
-
-#define IRQ0_IPR_ADDR  INTC_IPRC
-#define IRQ1_IPR_ADDR  INTC_IPRC
-#define IRQ2_IPR_ADDR  INTC_IPRC
-#define IRQ3_IPR_ADDR  INTC_IPRC
-#define IRQ4_IPR_ADDR  INTC_IPRD
-#define IRQ5_IPR_ADDR  INTC_IPRD
-
-#define IRQ0_IPR_POS   0
-#define IRQ1_IPR_POS   1
-#define IRQ2_IPR_POS   2
-#define IRQ3_IPR_POS   3
-#define IRQ4_IPR_POS   0
-#define IRQ5_IPR_POS   1
+unsigned int map_ipridx_to_addr(int idx);
 
-#define IRQ0_PRIORITY  1
-#define IRQ1_PRIORITY  1
-#define IRQ2_PRIORITY  1
-#define IRQ3_PRIORITY  1
-#define IRQ4_PRIORITY  1
-#define IRQ5_PRIORITY  1
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-
-#elif defined(CONFIG_CPU_SUBTYPE_SH7604)
-#define INTC_IPRA      0xfffffee2UL
-#define INTC_IPRB      0xfffffe60UL
-
-#define INTC_VCRA      0xfffffe62UL
-#define INTC_VCRB      0xfffffe64UL
-#define INTC_VCRC      0xfffffe66UL
-#define INTC_VCRD      0xfffffe68UL
-
-#define INTC_VCRWDT    0xfffffee4UL
-#define INTC_VCRDIV    0xffffff0cUL
-#define INTC_VCRDMA0   0xffffffa0UL
-#define INTC_VCRDMA1   0xffffffa8UL
-
-#define INTC_ICR       0xfffffee0UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-      defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define INTC_IRR0      0xa4000004UL
-#define INTC_IRR1      0xa4000006UL
-#define INTC_IRR2      0xa4000008UL
-
-#define INTC_ICR0      0xfffffee0UL
-#define INTC_ICR1      0xa4000010UL
-#define INTC_ICR2      0xa4000012UL
-#define INTC_INTER     0xa4000014UL
-
-#define INTC_IPRC      0xa4000016UL
-#define INTC_IPRD      0xa4000018UL
-#define INTC_IPRE      0xa400001aUL
-#if defined(CONFIG_CPU_SUBTYPE_SH7707)
-#define INTC_IPRF      0xa400001cUL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-#define INTC_IPRF      0xa4080000UL
-#define INTC_IPRG      0xa4080002UL
-#define INTC_IPRH      0xa4080004UL
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710)
-/* Interrupt Controller Registers */
-#undef INTC_IPRA
-#undef INTC_IPRB
-#define INTC_IPRA      0xA414FEE2UL
-#define INTC_IPRB      0xA414FEE4UL
-#define INTC_IPRF      0xA4080000UL
-#define INTC_IPRG      0xA4080002UL
-#define INTC_IPRH      0xA4080004UL
-#define INTC_IPRI      0xA4080006UL
-
-#undef INTC_ICR0
-#undef INTC_ICR1
-#define INTC_ICR0      0xA414FEE0UL
-#define INTC_ICR1      0xA4140010UL
-
-#define INTC_IRR0      0xa4000004UL
-#define INTC_IRR1      0xa4000006UL
-#define INTC_IRR2      0xa4000008UL
-#define INTC_IRR3      0xa400000AUL
-#define INTC_IRR4      0xa400000CUL
-#define INTC_IRR5      0xa4080020UL
-#define INTC_IRR7      0xa4080024UL
-#define INTC_IRR8      0xa4080026UL
-
-/* Interrupt numbers */
-#define TIMER2_IRQ      18
-#define TIMER2_IPR_ADDR INTC_IPRA
-#define TIMER2_IPR_POS   1
-#define TIMER2_PRIORITY  2
-
-/* WDT */
-#define WDT_IRQ                27
-#define WDT_IPR_ADDR   INTC_IPRB
-#define WDT_IPR_POS     3
-#define WDT_PRIORITY    2
-
-#define SCIF0_ERI_IRQ  52
-#define SCIF0_RXI_IRQ  53
-#define SCIF0_BRI_IRQ  54
-#define SCIF0_TXI_IRQ  55
-#define SCIF0_IPR_ADDR INTC_IPRE
-#define SCIF0_IPR_POS  2
-#define SCIF0_PRIORITY 3
-
-#define DMTE4_IRQ      76
-#define DMTE5_IRQ      77
-#define DMA2_IPR_ADDR  INTC_IPRF
-#define DMA2_IPR_POS   2
-#define DMA2_PRIORITY  7
-
-#define IPSEC_IRQ      79
-#define IPSEC_IPR_ADDR INTC_IPRF
-#define IPSEC_IPR_POS  3
-#define IPSEC_PRIORITY 3
-
-/* EDMAC */
-#define EDMAC0_IRQ     80
-#define EDMAC0_IPR_ADDR        INTC_IPRG
-#define EDMAC0_IPR_POS 3
-#define EDMAC0_PRIORITY        3
-
-#define EDMAC1_IRQ     81
-#define EDMAC1_IPR_ADDR        INTC_IPRG
-#define EDMAC1_IPR_POS 2
-#define EDMAC1_PRIORITY        3
-
-#define EDMAC2_IRQ     82
-#define EDMAC2_IPR_ADDR        INTC_IPRG
-#define EDMAC2_IPR_POS 1
-#define EDMAC2_PRIORITY        3
-
-/* SIOF */
-#define SIOF0_ERI_IRQ  96
-#define SIOF0_TXI_IRQ  97
-#define SIOF0_RXI_IRQ  98
-#define SIOF0_CCI_IRQ  99
-#define SIOF0_IPR_ADDR INTC_IPRH
-#define SIOF0_IPR_POS  0
-#define SIOF0_PRIORITY 7
-
-#define SIOF1_ERI_IRQ  100
-#define SIOF1_TXI_IRQ  101
-#define SIOF1_RXI_IRQ  102
-#define SIOF1_CCI_IRQ  103
-#define SIOF1_IPR_ADDR INTC_IPRI
-#define SIOF1_IPR_POS  1
-#define SIOF1_PRIORITY 7
-#endif /* CONFIG_CPU_SUBTYPE_SH7710 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
-#define PORT_PACR      0xa4050100UL
-#define PORT_PBCR      0xa4050102UL
-#define PORT_PCCR      0xa4050104UL
-#define PORT_PETCR     0xa4050106UL
-#define PORT_PADR      0xa4050120UL
-#define PORT_PBDR      0xa4050122UL
-#define PORT_PCDR      0xa4050124UL
-#else
-#define PORT_PACR      0xa4000100UL
-#define PORT_PBCR      0xa4000102UL
-#define PORT_PCCR      0xa4000104UL
-#define PORT_PFCR      0xa400010aUL
-#define PORT_PADR      0xa4000120UL
-#define PORT_PBDR      0xa4000122UL
-#define PORT_PCDR      0xa4000124UL
-#define PORT_PFDR      0xa400012aUL
-#endif
-
-#define IRQ0_IRQ       32
-#define IRQ1_IRQ       33
-#define IRQ2_IRQ       34
-#define IRQ3_IRQ       35
-#define IRQ4_IRQ       36
-#define IRQ5_IRQ       37
-
-#define IRQ0_IPR_ADDR  INTC_IPRC
-#define IRQ1_IPR_ADDR  INTC_IPRC
-#define IRQ2_IPR_ADDR  INTC_IPRC
-#define IRQ3_IPR_ADDR  INTC_IPRC
-#define IRQ4_IPR_ADDR  INTC_IPRD
-#define IRQ5_IPR_ADDR  INTC_IPRD
-
-#define IRQ0_IPR_POS   0
-#define IRQ1_IPR_POS   1
-#define IRQ2_IPR_POS   2
-#define IRQ3_IPR_POS   3
-#define IRQ4_IPR_POS   0
-#define IRQ5_IPR_POS   1
-
-#define IRQ0_PRIORITY  1
-#define IRQ1_PRIORITY  1
-#define IRQ2_PRIORITY  1
-#define IRQ3_PRIORITY  1
-#define IRQ4_PRIORITY  1
-#define IRQ5_PRIORITY  1
-
-#define PINT0_IRQ      40
-#define PINT8_IRQ      41
-
-#define PINT0_IPR_ADDR INTC_IPRD
-#define PINT8_IPR_ADDR INTC_IPRD
-
-#define PINT0_IPR_POS  3
-#define PINT8_IPR_POS  2
-#define PINT0_PRIORITY 2
-#define PINT8_PRIORITY 2
-
-extern int ipr_irq_demux(int irq);
-#define __irq_demux(irq) ipr_irq_demux(irq)
-#endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \
-    defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202)
-#define INTC_ICR        0xffd00000
-#define INTC_ICR_NMIL  (1<<15)
-#define INTC_ICR_MAI   (1<<14)
-#define INTC_ICR_NMIB  (1<<9)
-#define INTC_ICR_NMIE  (1<<8)
-#define INTC_ICR_IRLM  (1<<7)
-#endif
-
-#ifdef CONFIG_CPU_SUBTYPE_SH7780
-#include <asm/irq-sh7780.h>
-#endif
-
-/* SH with INTC2-style interrupts */
-#ifdef CONFIG_CPU_HAS_INTC2_IRQ
-#if defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-#define INTC2_BASE     0xfe080000
-#define INTC2_FIRST_IRQ 64
-#define INTC2_INTREQ_OFFSET    0x20
-#define INTC2_INTMSK_OFFSET    0x40
-#define INTC2_INTMSKCLR_OFFSET 0x60
-#define NR_INTC2_IRQS  25
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-#define INTC2_BASE     0xfe080000
-#define INTC2_FIRST_IRQ 48     /* INTEVT 0x800 */
-#define INTC2_INTREQ_OFFSET    0x20
-#define INTC2_INTMSK_OFFSET    0x40
-#define INTC2_INTMSKCLR_OFFSET 0x60
-#define NR_INTC2_IRQS  64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-#define INTC2_BASE     0xffd40000
-#define INTC2_FIRST_IRQ        21
-#define INTC2_INTMSK_OFFSET    (0x38)
-#define INTC2_INTMSKCLR_OFFSET (0x3c)
-#define NR_INTC2_IRQS  60
-#endif
+/*
+ * Enable individual interrupt mode for external IPR IRQs.
+ */
+void ipr_irq_enable_irlm(void);
 
-#define INTC2_INTPRI_OFFSET    0x00
+/*
+ * Function for "on chip support modules".
+ */
+void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs);
+void make_imask_irq(unsigned int irq);
+void init_IRQ_ipr(void);
 
 struct intc2_data {
        unsigned short irq;
@@ -693,20 +149,14 @@ struct intc2_data {
 
 void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs);
 void init_IRQ_intc2(void);
-#endif
-
-extern int shmse_irq_demux(int irq);
 
 static inline int generic_irq_demux(int irq)
 {
        return irq;
 }
 
-#ifndef __irq_demux
-#define __irq_demux(irq)       (irq)
-#endif
 #define irq_canonicalize(irq)  (irq)
-#define irq_demux(irq)         __irq_demux(sh_mv.mv_irq_demux(irq))
+#define irq_demux(irq)         sh_mv.mv_irq_demux(irq)
 
 #ifdef CONFIG_4KSTACKS
 extern void irq_ctx_init(int cpu);
@@ -717,12 +167,4 @@ extern void irq_ctx_exit(int cpu);
 # define irq_ctx_exit(cpu) do { } while (0)
 #endif
 
-#if defined(CONFIG_CPU_SUBTYPE_SH73180)
-#include <asm/irq-sh73180.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7343)
-#include <asm/irq-sh7343.h>
-#endif
-
 #endif /* __ASM_SH_IRQ_H */
diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h
new file mode 100644 (file)
index 0000000..9dedc1b
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef __ASM_SH_IRQFLAGS_H
+#define __ASM_SH_IRQFLAGS_H
+
+static inline void raw_local_irq_enable(void)
+{
+       unsigned long __dummy0, __dummy1;
+
+       __asm__ __volatile__ (
+               "stc    sr, %0\n\t"
+               "and    %1, %0\n\t"
+#ifdef CONFIG_CPU_HAS_SR_RB
+               "stc    r6_bank, %1\n\t"
+               "or     %1, %0\n\t"
+#endif
+               "ldc    %0, sr\n\t"
+               : "=&r" (__dummy0), "=r" (__dummy1)
+               : "1" (~0x000000f0)
+               : "memory"
+       );
+}
+
+static inline void raw_local_irq_disable(void)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__ (
+               "stc    sr, %0\n\t"
+               "or     #0xf0, %0\n\t"
+               "ldc    %0, sr\n\t"
+               : "=&z" (flags)
+               : /* no inputs */
+               : "memory"
+       );
+}
+
+static inline void set_bl_bit(void)
+{
+       unsigned long __dummy0, __dummy1;
+
+       __asm__ __volatile__ (
+               "stc    sr, %0\n\t"
+               "or     %2, %0\n\t"
+               "and    %3, %0\n\t"
+               "ldc    %0, sr\n\t"
+               : "=&r" (__dummy0), "=r" (__dummy1)
+               : "r" (0x10000000), "r" (0xffffff0f)
+               : "memory"
+       );
+}
+
+static inline void clear_bl_bit(void)
+{
+       unsigned long __dummy0, __dummy1;
+
+       __asm__ __volatile__ (
+               "stc    sr, %0\n\t"
+               "and    %2, %0\n\t"
+               "ldc    %0, sr\n\t"
+               : "=&r" (__dummy0), "=r" (__dummy1)
+               : "1" (~0x10000000)
+               : "memory"
+       );
+}
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__ (
+               "stc    sr, %0\n\t"
+               "and    #0xf0, %0\n\t"
+               : "=&z" (flags)
+               : /* no inputs */
+               : "memory"
+       );
+
+       return flags;
+}
+
+#define raw_local_save_flags(flags) \
+               do { (flags) = __raw_local_save_flags(); } while (0)
+
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+       return (flags != 0);
+}
+
+static inline int raw_irqs_disabled(void)
+{
+       unsigned long flags = __raw_local_save_flags();
+
+       return raw_irqs_disabled_flags(flags);
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+       unsigned long flags, __dummy;
+
+       __asm__ __volatile__ (
+               "stc    sr, %1\n\t"
+               "mov    %1, %0\n\t"
+               "or     #0xf0, %0\n\t"
+               "ldc    %0, sr\n\t"
+               "mov    %1, %0\n\t"
+               "and    #0xf0, %0\n\t"
+               : "=&z" (flags), "=&r" (__dummy)
+               : /* no inputs */
+               : "memory"
+       );
+
+       return flags;
+}
+
+#define raw_local_irq_save(flags) \
+               do { (flags) = __raw_local_irq_save(); } while (0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+       if ((flags & 0xf0) != 0xf0)
+               raw_local_irq_enable();
+}
+
+#endif /* __ASM_SH_IRQFLAGS_H */
index c7088efe579add81dae62e2a3dd54134b06fef3b..46f04e23bd45ae9133c90c811ec1808f7832a14c 100644 (file)
@@ -10,7 +10,6 @@
 
 #include <asm/cpu/mmu_context.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
@@ -42,10 +41,8 @@ extern unsigned long mmu_context_cache;
 /*
  * Get MMU context if needed.
  */
-static __inline__ void
-get_mmu_context(struct mm_struct *mm)
+static inline void get_mmu_context(struct mm_struct *mm)
 {
-       extern void flush_tlb_all(void);
        unsigned long mc = mmu_context_cache;
 
        /* Check if we have old version of context. */
@@ -61,6 +58,7 @@ get_mmu_context(struct mm_struct *mm)
                 * Flush all TLB and start new cycle.
                 */
                flush_tlb_all();
+
                /*
                 * Fix version; Note that we avoid version #0
                 * to distingush NO_CONTEXT.
@@ -75,11 +73,10 @@ get_mmu_context(struct mm_struct *mm)
  * Initialize the context related info for a new mm_struct
  * instance.
  */
-static __inline__ int init_new_context(struct task_struct *tsk,
+static inline int init_new_context(struct task_struct *tsk,
                                       struct mm_struct *mm)
 {
        mm->context.id = NO_CONTEXT;
-
        return 0;
 }
 
@@ -87,12 +84,12 @@ static __inline__ int init_new_context(struct task_struct *tsk,
  * Destroy context related info for an mm_struct that is about
  * to be put to rest.
  */
-static __inline__ void destroy_context(struct mm_struct *mm)
+static inline void destroy_context(struct mm_struct *mm)
 {
        /* Do nothing */
 }
 
-static __inline__ void set_asid(unsigned long asid)
+static inline void set_asid(unsigned long asid)
 {
        unsigned long __dummy;
 
@@ -105,7 +102,7 @@ static __inline__ void set_asid(unsigned long asid)
                                "r" (0xffffff00));
 }
 
-static __inline__ unsigned long get_asid(void)
+static inline unsigned long get_asid(void)
 {
        unsigned long asid;
 
@@ -120,24 +117,29 @@ static __inline__ unsigned long get_asid(void)
  * After we have set current->mm to a new value, this activates
  * the context for the new mm so we see the new mappings.
  */
-static __inline__ void activate_context(struct mm_struct *mm)
+static inline void activate_context(struct mm_struct *mm)
 {
        get_mmu_context(mm);
        set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK);
 }
 
-/* MMU_TTB can be used for optimizing the fault handling.
-   (Currently not used) */
-static __inline__ void switch_mm(struct mm_struct *prev,
-                                struct mm_struct *next,
-                                struct task_struct *tsk)
+/* MMU_TTB is used for optimizing the fault handling. */
+static inline void set_TTB(pgd_t *pgd)
 {
-       if (likely(prev != next)) {
-               unsigned long __pgdir = (unsigned long)next->pgd;
+       ctrl_outl((unsigned long)pgd, MMU_TTB);
+}
 
-               __asm__ __volatile__("mov.l     %0, %1"
-                                    : /* no output */
-                                    : "r" (__pgdir), "m" (__m(MMU_TTB)));
+static inline pgd_t *get_TTB(void)
+{
+       return (pgd_t *)ctrl_inl(MMU_TTB);
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+                            struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       if (likely(prev != next)) {
+               set_TTB(next->pgd);
                activate_context(next);
        }
 }
@@ -147,7 +149,7 @@ static __inline__ void switch_mm(struct mm_struct *prev,
 #define activate_mm(prev, next) \
        switch_mm((prev),(next),NULL)
 
-static __inline__ void
+static inline void
 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
index ca8b26d90475937f0d80dde07cd2b2c12e44127c..380fd62dd05ade8ac5806711c985ee96f94adf93 100644 (file)
    [ P4 control   ]            0xE0000000
  */
 
-
 /* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT     12
+#if defined(CONFIG_PAGE_SIZE_4KB)
+# define PAGE_SHIFT    12
+#elif defined(CONFIG_PAGE_SIZE_8KB)
+# define PAGE_SHIFT    13
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define PAGE_SHIFT    16
+#else
+# error "Bogus kernel page size?"
+#endif
 
 #ifdef __ASSEMBLY__
 #define PAGE_SIZE      (1 << PAGE_SHIFT)
 
 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
 #define HPAGE_SHIFT    16
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#define HPAGE_SHIFT    18
 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
 #define HPAGE_SHIFT    20
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define HPAGE_SHIFT    22
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#define HPAGE_SHIFT    26
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -69,15 +82,25 @@ extern void __copy_user_page(void *to, void *from, void *orig_to);
 /*
  * These are used to make use of C type-checking..
  */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pgd; } pgd_t;
+#ifdef CONFIG_X2TLB
+typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long long pgprot; } pgprot_t;
+#define pte_val(x) \
+       ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+#define __pte(x) \
+       ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+#else
+typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
+#define pte_val(x)     ((x).pte_low)
+#define __pte(x) ((pte_t) { (x) } )
+#endif
+
+typedef struct { unsigned long pgd; } pgd_t;
 
-#define pte_val(x)     ((x).pte)
 #define pgd_val(x)     ((x).pgd)
 #define pgprot_val(x)  ((x).pgprot)
 
-#define __pte(x) ((pte_t) { (x) } )
 #define __pgd(x) ((pgd_t) { (x) } )
 #define __pgprot(x)    ((pgprot_t) { (x) } )
 
index e841465ab4d24214955704852dde0b99a3da35e8..888e4529e6fe3c4bb2108f134b8b7674435836c3 100644 (file)
@@ -1,13 +1,16 @@
 #ifndef __ASM_SH_PGALLOC_H
 #define __ASM_SH_PGALLOC_H
 
-#define pmd_populate_kernel(mm, pmd, pte) \
-               set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+{
+       set_pmd(pmd, __pmd((unsigned long)pte));
+}
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
                                struct page *pte)
 {
-       set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
+       set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
 }
 
 /*
@@ -15,7 +18,16 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+
+       if (pgd) {
+               memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+               memcpy(pgd + USER_PTRS_PER_PGD,
+                      swapper_pg_dir + USER_PTRS_PER_PGD,
+                      (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+       }
+
+       return pgd;
 }
 
 static inline void pgd_free(pgd_t *pgd)
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h
deleted file mode 100644 (file)
index b525db6..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef __ASM_SH_PGTABLE_2LEVEL_H
-#define __ASM_SH_PGTABLE_2LEVEL_H
-
-/*
- * traditional two-level paging structure:
- */
-
-#define PGDIR_SHIFT    22
-#define PTRS_PER_PGD   1024
-
-/*
- * this is two-level, so we don't really have any
- * PMD directory physically.
- */
-#define PMD_SHIFT      22
-#define PTRS_PER_PMD   1
-
-#define PTRS_PER_PTE   1024
-
-#ifndef __ASSEMBLY__
-#define pte_ERROR(e) \
-       printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-       printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
-       printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-static inline int pgd_none(pgd_t pgd)          { return 0; }
-static inline int pgd_bad(pgd_t pgd)           { return 0; }
-static inline int pgd_present(pgd_t pgd)       { return 1; }
-static inline void pgd_clear (pgd_t * pgdp)    { }
-
-/*
- * Certain architectures need to do special things when PTEs
- * within a page table are directly modified.  Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * (pmds are folded into pgds so this doesn't get actually called,
- * but the define is needed for a generic inline function.)
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-#define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval)
-
-#define pgd_page_vaddr(pgd) \
-((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
-
-#define pgd_page(pgd) \
-       (phys_to_page(pgd_val(pgd)))
-
-static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
-{
-       return (pmd_t *) dir;
-}
-
-#define pte_pfn(x)             ((unsigned long)(((x).pte >> PAGE_SHIFT)))
-#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_SH_PGTABLE_2LEVEL_H */
index 2c8682ad1012531dcaaf54644865fea6b4a52ea0..c84901dbd8e51cf2097e591c6c439c232443d611 100644 (file)
 #include <asm-generic/pgtable-nopmd.h>
 #include <asm/page.h>
 
-#define PTRS_PER_PGD           1024
-
 #ifndef __ASSEMBLY__
 #include <asm/addrspace.h>
 #include <asm/fixmap.h>
 
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void paging_init(void);
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -33,15 +28,28 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 #endif /* !__ASSEMBLY__ */
 
-/* traditional two-level paging structure */
-#define PGDIR_SHIFT    22
-#define PTRS_PER_PMD   1
-#define PTRS_PER_PTE   1024
-#define PMD_SIZE       (1UL << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE-1))
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+/*
+ * traditional two-level paging structure
+ */
+/* PTE bits */
+#ifdef CONFIG_X2TLB
+# define PTE_MAGNITUDE 3       /* 64-bit PTEs on extended mode SH-X2 TLB */
+#else
+# define PTE_MAGNITUDE 2       /* 32-bit PTEs */
+#endif
+#define PTE_SHIFT      PAGE_SHIFT
+#define PTE_BITS       (PTE_SHIFT - PTE_MAGNITUDE)
+
+/* PGD bits */
+#define PGDIR_SHIFT    (PTE_SHIFT + PTE_BITS)
+#define PGDIR_BITS     (32 - PGDIR_SHIFT)
+#define PGDIR_SIZE     (1 << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
+/* Entries per level */
+#define PTRS_PER_PTE   (PAGE_SIZE / 4)
+#define PTRS_PER_PGD   (PAGE_SIZE / 4)
+
 #define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS     0
 
@@ -49,7 +57,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 /*
  * First 1MB map is used by fixed purpose.
- * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
+ * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c)
  */
 #define VMALLOC_START  (P3SEG+0x00100000)
 #define VMALLOC_END    (FIXADDR_START-2*PAGE_SIZE)
@@ -57,7 +65,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 /*
  * Linux PTEL encoding.
  *
- * Hardware and software bit definitions for the PTEL value:
+ * Hardware and software bit definitions for the PTEL value (see below for
+ * notes on SH-X2 MMUs and 64-bit PTEs):
  *
  * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4).
  *
@@ -76,20 +85,57 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  *
  * - Bits 31, 30, and 29 remain unused by everyone and can be used for future
  *   software flags, although care must be taken to update _PAGE_CLEAR_FLAGS.
+ *
+ * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day.
+ *
+ * SH-X2 MMUs and extended PTEs
+ *
+ * SH-X2 supports an extended mode TLB with split data arrays due to the
+ * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and
+ * SZ bit placeholders still exist in data array 1, but are implemented as
+ * reserved bits, with the real logic existing in data array 2.
+ *
+ * The downside to this is that we can no longer fit everything in to a 32-bit
+ * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus
+ * side, this gives us quite a few spare bits to play with for future usage.
  */
+/* Legacy and compat mode bits */
 #define        _PAGE_WT        0x001           /* WT-bit on SH-4, 0 on SH-3 */
 #define _PAGE_HW_SHARED        0x002           /* SH-bit  : shared among processes */
 #define _PAGE_DIRTY    0x004           /* D-bit   : page changed */
 #define _PAGE_CACHABLE 0x008           /* C-bit   : cachable */
-#define _PAGE_SZ0      0x010           /* SZ0-bit : Size of page */
-#define _PAGE_RW       0x020           /* PR0-bit : write access allowed */
-#define _PAGE_USER     0x040           /* PR1-bit : user space access allowed */
-#define _PAGE_SZ1      0x080           /* SZ1-bit : Size of page (on SH-4) */
+#ifndef CONFIG_X2TLB
+# define _PAGE_SZ0     0x010           /* SZ0-bit : Size of page */
+# define _PAGE_RW      0x020           /* PR0-bit : write access allowed */
+# define _PAGE_USER    0x040           /* PR1-bit : user space access allowed*/
+# define _PAGE_SZ1     0x080           /* SZ1-bit : Size of page (on SH-4) */
+#endif
 #define _PAGE_PRESENT  0x100           /* V-bit   : page is valid */
 #define _PAGE_PROTNONE 0x200           /* software: if not present  */
 #define _PAGE_ACCESSED 0x400           /* software: page referenced */
 #define _PAGE_FILE     _PAGE_WT        /* software: pagecache or swap? */
 
+/* Extended mode bits */
+#define _PAGE_EXT_ESZ0         0x0010  /* ESZ0-bit: Size of page */
+#define _PAGE_EXT_ESZ1         0x0020  /* ESZ1-bit: Size of page */
+#define _PAGE_EXT_ESZ2         0x0040  /* ESZ2-bit: Size of page */
+#define _PAGE_EXT_ESZ3         0x0080  /* ESZ3-bit: Size of page */
+
+#define _PAGE_EXT_USER_EXEC    0x0100  /* EPR0-bit: User space executable */
+#define _PAGE_EXT_USER_WRITE   0x0200  /* EPR1-bit: User space writable */
+#define _PAGE_EXT_USER_READ    0x0400  /* EPR2-bit: User space readable */
+
+#define _PAGE_EXT_KERN_EXEC    0x0800  /* EPR3-bit: Kernel space executable */
+#define _PAGE_EXT_KERN_WRITE   0x1000  /* EPR4-bit: Kernel space writable */
+#define _PAGE_EXT_KERN_READ    0x2000  /* EPR5-bit: Kernel space readable */
+
+/* Wrapper for extended mode pgprot twiddling */
+#ifdef CONFIG_X2TLB
+# define _PAGE_EXT(x)          ((unsigned long long)(x) << 32)
+#else
+# define _PAGE_EXT(x)          (0)
+#endif
+
 /* software: moves to PTEA.TC (Timing Control) */
 #define _PAGE_PCC_AREA5        0x00000000      /* use BSC registers for area5 */
 #define _PAGE_PCC_AREA6        0x80000000      /* use BSC registers for area6 */
@@ -114,37 +160,160 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 #define _PAGE_FLAGS_HARDWARE_MASK      (0x1fffffff & ~(_PAGE_CLEAR_FLAGS))
 
-/* Hardware flags: SZ0=1 (4k-byte) */
-#define _PAGE_FLAGS_HARD       _PAGE_SZ0
+/* Hardware flags, page size encoding */
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD     _PAGE_EXT(_PAGE_EXT_ESZ0)
+# elif defined(CONFIG_PAGE_SIZE_8KB)
+#  define _PAGE_FLAGS_HARD     _PAGE_EXT(_PAGE_EXT_ESZ1)
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD     _PAGE_EXT(_PAGE_EXT_ESZ2)
+# endif
+#else
+# if defined(CONFIG_PAGE_SIZE_4KB)
+#  define _PAGE_FLAGS_HARD     _PAGE_SZ0
+# elif defined(CONFIG_PAGE_SIZE_64KB)
+#  define _PAGE_FLAGS_HARD     _PAGE_SZ1
+# endif
+#endif
 
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE   (_PAGE_SZ1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE   (_PAGE_SZ0 | _PAGE_SZ1)
+#if defined(CONFIG_X2TLB)
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE (_PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
+#  define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#  define _PAGE_SZHUGE (_PAGE_EXT_ESZ3)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
+#  define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3)
+# endif
+#else
+# if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#  define _PAGE_SZHUGE (_PAGE_SZ1)
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
+#  define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1)
+# endif
+#endif
+
+/*
+ * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
+ * to make pte_mkhuge() happy.
+ */
+#ifndef _PAGE_SZHUGE
+# define _PAGE_SZHUGE  (_PAGE_FLAGS_HARD)
 #endif
 
-#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
+#define _PAGE_CHG_MASK \
+       (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MMU
-#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+#if defined(CONFIG_X2TLB) /* SH-X2 TLB */
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_USER_READ | \
+                                          _PAGE_EXT_USER_WRITE))
+
+#define PAGE_EXECREAD  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
+                                          _PAGE_EXT_USER_READ))
+
+#define PAGE_COPY      PAGE_EXECREAD
+
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_USER_READ))
+
+#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_USER_WRITE))
+
+#define PAGE_RWX       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+                                _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
+                                          _PAGE_EXT_USER_READ  | \
+                                          _PAGE_EXT_USER_EXEC))
+
+#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+                                _PAGE_DIRTY | _PAGE_ACCESSED | \
+                                _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_EXEC))
+
 #define PAGE_KERNEL_NOCACHE \
-                       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+                       __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+                                _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+                                _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_EXEC))
+
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+                                _PAGE_DIRTY | _PAGE_ACCESSED | \
+                                _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+                                          _PAGE_EXT_KERN_EXEC))
+
+#define PAGE_KERNEL_PCC(slot, type) \
+                       __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+                                _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_EXEC) \
+                                (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+                                (type))
+
+#elif defined(CONFIG_MMU) /* SH-X TLB */
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+                                _PAGE_CACHABLE | _PAGE_ACCESSED | \
+                                _PAGE_FLAGS_HARD)
+
+#define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
+
+#define PAGE_EXECREAD  PAGE_READONLY
+#define PAGE_RWX       PAGE_SHARED
+#define PAGE_WRITEONLY PAGE_SHARED
+
+#define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \
+                                _PAGE_DIRTY | _PAGE_ACCESSED | \
+                                _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_NOCACHE \
+                       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+                                _PAGE_ACCESSED | _PAGE_HW_SHARED | \
+                                _PAGE_FLAGS_HARD)
+
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
+                                _PAGE_DIRTY | _PAGE_ACCESSED | \
+                                _PAGE_HW_SHARED | _PAGE_FLAGS_HARD)
+
 #define PAGE_KERNEL_PCC(slot, type) \
-                       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type))
+                       __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \
+                                _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
+                                (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
+                                (type))
 #else /* no mmu */
 #define PAGE_NONE              __pgprot(0)
 #define PAGE_SHARED            __pgprot(0)
 #define PAGE_COPY              __pgprot(0)
+#define PAGE_EXECREAD          __pgprot(0)
+#define PAGE_RWX               __pgprot(0)
 #define PAGE_READONLY          __pgprot(0)
+#define PAGE_WRITEONLY         __pgprot(0)
 #define PAGE_KERNEL            __pgprot(0)
 #define PAGE_KERNEL_NOCACHE    __pgprot(0)
 #define PAGE_KERNEL_RO         __pgprot(0)
@@ -154,27 +323,32 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #endif /* __ASSEMBLY__ */
 
 /*
- * As i386 and MIPS, SuperH can't do page protection for execute, and
- * considers that the same as a read.  Also, write permissions imply
- * read permissions. This is the closest we can get..
+ * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
+ * protection for execute, and considers it the same as a read. Also, write
+ * permission implies read permission. This is the closest we can get..
+ *
+ * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
+ * not only supporting separate execute, read, and write bits, but having
+ * completely separate permission bits for user and kernel space.
  */
+        /*xwr*/
 #define __P000 PAGE_NONE
 #define __P001 PAGE_READONLY
 #define __P010 PAGE_COPY
 #define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
+#define __P100 PAGE_EXECREAD
+#define __P101 PAGE_EXECREAD
 #define __P110 PAGE_COPY
 #define __P111 PAGE_COPY
 
 #define __S000 PAGE_NONE
 #define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
+#define __S010 PAGE_WRITEONLY
 #define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
+#define __S100 PAGE_EXECREAD
+#define __S101 PAGE_EXECREAD
+#define __S110 PAGE_RWX
+#define __S111 PAGE_RWX
 
 #ifndef __ASSEMBLY__
 
@@ -183,7 +357,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  * within a page table are directly modified.  Thus, the following
  * hook is made available.
  */
+#ifdef CONFIG_X2TLB
+static inline void set_pte(pte_t *ptep, pte_t pte)
+{
+       ptep->pte_high = pte.pte_high;
+       smp_wmb();
+       ptep->pte_low = pte.pte_low;
+}
+#else
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+#endif
+
 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 
 /*
@@ -192,18 +376,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  */
 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 
-#define pte_pfn(x)             ((unsigned long)(((x).pte >> PAGE_SHIFT)))
+#define pte_pfn(x)             ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 
 #define pte_none(x)    (!pte_val(x))
 #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-#define pte_clear(mm,addr,xp)  do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pmd_none(x)    (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_present(x) (pmd_val(x))
 #define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define        pmd_bad(x)      (pmd_val(x) & ~PAGE_MASK)
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 #define pte_page(x)    phys_to_page(pte_val(x)&PTE_PHYS_MASK)
@@ -212,28 +396,52 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; }
-static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); }
-
-static inline pte_t pte_rdprotect(pte_t pte)   { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_exprotect(pte_t pte)   { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte)       { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_wrprotect(pte_t pte)   { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-static inline pte_t pte_mkread(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkexec(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline pte_t pte_mkhuge(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+#define pte_not_present(pte)   (!(pte_val(pte) & _PAGE_PRESENT))
+#define pte_dirty(pte)         (pte_val(pte) & _PAGE_DIRTY)
+#define pte_young(pte)         (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_file(pte)          (pte_val(pte) & _PAGE_FILE)
+
+#ifdef CONFIG_X2TLB
+#define pte_read(pte)          ((pte).pte_high & _PAGE_EXT_USER_READ)
+#define pte_exec(pte)          ((pte).pte_high & _PAGE_EXT_USER_EXEC)
+#define pte_write(pte)         ((pte).pte_high & _PAGE_EXT_USER_WRITE)
+#else
+#define pte_read(pte)          (pte_val(pte) & _PAGE_USER)
+#define pte_exec(pte)          (pte_val(pte) & _PAGE_USER)
+#define pte_write(pte)         (pte_val(pte) & _PAGE_RW)
 #endif
 
+#define PTE_BIT_FUNC(h,fn,op) \
+static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
+
+#ifdef CONFIG_X2TLB
+/*
+ * We cheat a bit in the SH-X2 TLB case. As the permission bits are
+ * individually toggled (and user permissions are entirely decoupled from
+ * kernel permissions), we attempt to couple them a bit more sanely here.
+ */
+PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ);
+PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ);
+PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
+PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC);
+PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC);
+PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
+#else
+PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkread, |= _PAGE_USER);
+PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
+PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
+PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER);
+PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER);
+PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
+#endif
+
+PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
+PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
+PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
+
 /*
  * Macro and implementation to make a page protection as uncachable.
  */
@@ -258,13 +466,14 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-
-#define pmd_page_vaddr(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+{
+       set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+                           pgprot_val(newprot)));
+       return pte;
+}
 
-#define pmd_page(pmd) \
-       (phys_to_page(pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)    pmd_val(pmd)
+#define pmd_page(pmd)          (virt_to_page(pmd_val(pmd)))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -283,8 +492,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #define pte_unmap(pte)         do { } while (0)
 #define pte_unmap_nested(pte)  do { } while (0)
 
+#ifdef CONFIG_X2TLB
+#define pte_ERROR(e) \
+       printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
+              &(e), (e).pte_high, (e).pte_low)
+#else
 #define pte_ERROR(e) \
        printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#endif
+
 #define pgd_ERROR(e) \
        printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
@@ -337,6 +553,9 @@ extern unsigned int kobjsize(const void *objp);
 extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 #endif
 
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern void paging_init(void);
+
 #include <asm-generic/pgtable.h>
 
 #endif /* !__ASSEMBLY__ */
index 45bb74e35d325acbbccc4607528ea2c4e07d0a59..6f1dd7ca1b1d68515bab5f3ebaa08efd1275fbd5 100644 (file)
  */
 enum cpu_type {
        /* SH-2 types */
-       CPU_SH7604,
+       CPU_SH7604, CPU_SH7619,
+
+       /* SH-2A types */
+       CPU_SH7206,
 
        /* SH-3 types */
        CPU_SH7705, CPU_SH7706, CPU_SH7707,
@@ -47,7 +50,10 @@ enum cpu_type {
        /* SH-4 types */
        CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R,
        CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501,
+
+       /* SH-4A types */
        CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781,
+       CPU_SH7785,
 
        /* Unknown subtype */
        CPU_SH_NONE
@@ -130,12 +136,11 @@ union sh_fpu_union {
 };
 
 struct thread_struct {
+       /* Saved registers when thread is descheduled */
        unsigned long sp;
        unsigned long pc;
 
-       unsigned long trap_no, error_code;
-       unsigned long address;
-       /* Hardware debugging registers may come here */
+       /* Hardware debugging registers */
        unsigned long ubc_pc;
 
        /* floating point info */
@@ -150,12 +155,7 @@ typedef struct {
 extern int ubc_usercnt;
 
 #define INIT_THREAD  {                                         \
-       sizeof(init_stack) + (long) &init_stack, /* sp */       \
-       0,                                       /* pc */       \
-       0, 0,                                                   \
-       0,                                                      \
-       0,                                                      \
-       {{{0,}},}                               /* fpu state */ \
+       .sp = sizeof(init_stack) + (long) &init_stack,          \
 }
 
 /*
@@ -259,8 +259,8 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
                struct pt_regs *regs);
 extern unsigned long get_wchan(struct task_struct *p);
 
-#define KSTK_EIP(tsk)  ((tsk)->thread.pc)
-#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
+#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 
 #define cpu_sleep()    __asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()    barrier()
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h
new file mode 100644 (file)
index 0000000..dfc6bad
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef __ASM_SH_PUSH_SWITCH_H
+#define __ASM_SH_PUSH_SWITCH_H
+
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+
+struct push_switch {
+       /* switch state */
+       unsigned int            state:1;
+       /* debounce timer */
+       struct timer_list       debounce;
+       /* workqueue */
+       struct work_struct      work;
+};
+
+struct push_switch_platform_info {
+       /* IRQ handler */
+       irqreturn_t             (*irq_handler)(int irq, void *data);
+       /* Special IRQ flags */
+       unsigned int            irq_flags;
+       /* Bit location of switch */
+       unsigned int            bit;
+       /* Symbolic switch name */
+       const char              *name;
+};
+
+#endif /* __ASM_SH_PUSH_SWITCH_H */
index 9d2aea5e848854c170c21f901f999ea13a0e9492..4931ba817d734aa8192374ef9e563d69b48f4c94 100644 (file)
@@ -25,11 +25,21 @@ struct rw_semaphore {
 #define RWSEM_ACTIVE_WRITE_BIAS                (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
        spinlock_t              wait_lock;
        struct list_head        wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map      dep_map;
+#endif
 };
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name) \
        { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-         LIST_HEAD_INIT((name).wait_list) }
+         LIST_HEAD_INIT((name).wait_list) \
+         __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name)            \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@@ -39,6 +49,16 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+                        struct lock_class_key *key);
+
+#define init_rwsem(sem)                                \
+do {                                           \
+       static struct lock_class_key __key;     \
+                                               \
+       __init_rwsem((sem), #sem, &__key);      \
+} while (0)
+
 static inline void init_rwsem(struct rw_semaphore *sem)
 {
        sem->count = RWSEM_UNLOCKED_VALUE;
@@ -141,6 +161,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+       __down_write(sem);
+}
+
 /*
  * implement exchange and add functionality
  */
diff --git a/include/asm-sh/se7206.h b/include/asm-sh/se7206.h
new file mode 100644 (file)
index 0000000..698eb80
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __ASM_SH_SE7206_H
+#define __ASM_SH_SE7206_H
+
+#define PA_SMSC                0x30000000
+#define PA_MRSHPC      0x34000000
+#define PA_LED         0x31400000
+
+void init_se7206_IRQ(void);
+
+#define __IO_PREFIX    se7206
+#include <asm/io_generic.h>
+
+#endif /* __ASM_SH_SE7206_H */
index 34ca8a7f06ba86bec5bfc45b2f778c71b3164536..1583c6b7bdaa6a4d411895a5af7e4311185b2a0a 100644 (file)
@@ -1,10 +1,12 @@
-#ifdef __KERNEL__
 #ifndef _SH_SETUP_H
 #define _SH_SETUP_H
 
 #define COMMAND_LINE_SIZE 256
 
+#ifdef __KERNEL__
+
 int setup_early_printk(char *);
 
-#endif /* _SH_SETUP_H */
 #endif /* __KERNEL__ */
+
+#endif /* _SH_SETUP_H */
index 3340126f4e0fc7785cef0ed7e4c191583fdc6e35..b1e42e7f998b5639a41f57281a35d1d89c7c1ab6 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2002 Paul Mundt
  */
 
+#include <linux/irqflags.h>
 #include <asm/types.h>
 
 /*
@@ -131,103 +132,6 @@ static inline unsigned long tas(volatile int *m)
 
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 
-/* Interrupt Control */
-#ifdef CONFIG_CPU_HAS_SR_RB
-static inline void local_irq_enable(void)
-{
-       unsigned long __dummy0, __dummy1;
-
-       __asm__ __volatile__("stc       sr, %0\n\t"
-                            "and       %1, %0\n\t"
-                            "stc       r6_bank, %1\n\t"
-                            "or        %1, %0\n\t"
-                            "ldc       %0, sr"
-                            : "=&r" (__dummy0), "=r" (__dummy1)
-                            : "1" (~0x000000f0)
-                            : "memory");
-}
-#else
-static inline void local_irq_enable(void)
-{
-       unsigned long __dummy0, __dummy1;
-
-       __asm__ __volatile__ (
-               "stc    sr, %0\n\t"
-               "and    %1, %0\n\t"
-               "ldc    %0, sr\n\t"
-               : "=&r" (__dummy0), "=r" (__dummy1)
-               : "1" (~0x000000f0)
-               : "memory");
-}
-#endif
-
-static inline void local_irq_disable(void)
-{
-       unsigned long __dummy;
-       __asm__ __volatile__("stc       sr, %0\n\t"
-                            "or        #0xf0, %0\n\t"
-                            "ldc       %0, sr"
-                            : "=&z" (__dummy)
-                            : /* no inputs */
-                            : "memory");
-}
-
-static inline void set_bl_bit(void)
-{
-       unsigned long __dummy0, __dummy1;
-
-       __asm__ __volatile__ ("stc      sr, %0\n\t"
-                            "or        %2, %0\n\t"
-                            "and       %3, %0\n\t"
-                            "ldc       %0, sr"
-                            : "=&r" (__dummy0), "=r" (__dummy1)
-                            : "r" (0x10000000), "r" (0xffffff0f)
-                            : "memory");
-}
-
-static inline void clear_bl_bit(void)
-{
-       unsigned long __dummy0, __dummy1;
-
-       __asm__ __volatile__ ("stc      sr, %0\n\t"
-                            "and       %2, %0\n\t"
-                            "ldc       %0, sr"
-                            : "=&r" (__dummy0), "=r" (__dummy1)
-                            : "1" (~0x10000000)
-                            : "memory");
-}
-
-#define local_save_flags(x) \
-       __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
-
-#define irqs_disabled()                        \
-({                                     \
-       unsigned long flags;            \
-       local_save_flags(flags);        \
-       (flags != 0);                   \
-})
-
-static inline unsigned long local_irq_save(void)
-{
-       unsigned long flags, __dummy;
-
-       __asm__ __volatile__("stc       sr, %1\n\t"
-                            "mov       %1, %0\n\t"
-                            "or        #0xf0, %0\n\t"
-                            "ldc       %0, sr\n\t"
-                            "mov       %1, %0\n\t"
-                            "and       #0xf0, %0"
-                            : "=&z" (flags), "=&r" (__dummy)
-                            :/**/
-                            : "memory" );
-       return flags;
-}
-
-#define local_irq_restore(x) do {                      \
-       if ((x & 0x000000f0) != 0x000000f0)             \
-               local_irq_enable();                     \
-} while (0)
-
 /*
  * Jump to P2 area.
  * When handling TLB or caches, we need to do it from P2 area.
@@ -264,9 +168,6 @@ do {                                                        \
                : "=&r" (__dummy));                     \
 } while (0)
 
-/* For spinlocks etc */
-#define local_irq_save(x)      x = local_irq_save()
-
 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 {
        unsigned long flags, retval;
index 3ebc3f9039ebe3f6cebdaa43c913c5c93dbd0547..0c01dc550819e865f52b8c135797220abecce60c 100644 (file)
@@ -90,13 +90,7 @@ static inline struct thread_info *current_thread_info(void)
 #endif
 #define free_thread_info(ti)   kfree(ti)
 
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg) \
-       stc     r7_bank, reg
-
-#endif
+#endif /* __ASSEMBLY__ */
 
 /*
  * thread information flags
index 5df842bcf7b63d554ef6486b11e07a663514c373..17b5e76a4c3131471599b52bb3aea12ce78aad43 100644 (file)
@@ -18,11 +18,32 @@ struct sys_timer {
 
        struct sys_device       dev;
        struct sys_timer_ops    *ops;
+
+#ifdef CONFIG_NO_IDLE_HZ
+       struct dyn_tick_timer   *dyn_tick;
+#endif
 };
 
+#ifdef CONFIG_NO_IDLE_HZ
+#define DYN_TICK_ENABLED       (1 << 1)
+
+struct dyn_tick_timer {
+       spinlock_t      lock;
+       unsigned int    state;                  /* Current state */
+       int             (*enable)(void);        /* Enables dynamic tick */
+       int             (*disable)(void);       /* Disables dynamic tick */
+       void            (*reprogram)(unsigned long); /* Reprograms the timer */
+       int             (*handler)(int, void *);
+};
+
+void timer_dyn_reprogram(void);
+#else
+#define timer_dyn_reprogram()  do { } while (0)
+#endif
+
 #define TICK_SIZE (tick_nsec / 1000)
 
-extern struct sys_timer tmu_timer;
+extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer;
 extern struct sys_timer *sys_timer;
 
 #ifndef CONFIG_GENERIC_TIME
index 270a4f4bc8a9c3689e67fca7de04ae29c070dda1..03f3583c891882803cf600e33e59aa4231a932ed 100644 (file)
@@ -1,9 +1,8 @@
 /*
  * Platform defintions for Titan
  */
-
-#ifndef _ASM_SH_TITAN_TITAN_H
-#define _ASM_SH_TITAN_TITAN_H
+#ifndef _ASM_SH_TITAN_H
+#define _ASM_SH_TITAN_H
 
 #define __IO_PREFIX titan
 #include <asm/io_generic.h>
 #define TITAN_IRQ_MPCIB                11      /* mPCI B */
 #define TITAN_IRQ_USB          11      /* USB */
 
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt.  In fact the priority
- * is the interrupt :-)
- */
-#define IRL0_IRQ       0
-#define IRL0_IPR_ADDR  INTC_IPRD
-#define IRL0_IPR_POS   3
-#define IRL0_PRIORITY  8
-
-#define IRL1_IRQ       1
-#define IRL1_IPR_ADDR  INTC_IPRD
-#define IRL1_IPR_POS   2
-#define IRL1_PRIORITY  8
-
-#define IRL2_IRQ       2
-#define IRL2_IPR_ADDR  INTC_IPRD
-#define IRL2_IPR_POS   1
-#define IRL2_PRIORITY  8
-
-#define IRL3_IRQ       3
-#define IRL3_IPR_ADDR  INTC_IPRD
-#define IRL3_IPR_POS   0
-#define IRL3_PRIORITY  8
-
-#endif
+#endif /* __ASM_SH_TITAN_H */
index 3c09dd4ca31cabb7d59e3d56415a2638839eeb1a..fd00dbb82f84ff8122784d663578dd5ca935d8fa 100644 (file)
@@ -52,16 +52,6 @@ typedef unsigned long long u64;
 
 typedef u32 dma_addr_t;
 
-#ifdef CONFIG_LBD
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-#endif
-
-#ifdef CONFIG_LSF
-typedef u64 blkcnt_t;
-#define HAVE_BLKCNT_T
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 1c2abde122cd962ccf5713af59793eec463d4d07..f982073dc6c68250b580b11c814a5d77afed72dc 100644 (file)
 
 #ifdef __KERNEL__
 
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-       /* Avoid using "res" which is declared to be in register r0; \
-          errno might expand to a function call and clobber it.  */ \
-               int __err = -(res); \
-               errno = __err; \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-__asm__ __volatile__ ("trapa   #0x10" \
-       : "=z" (__sc0) \
-       : "0" (__sc0) \
-       : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-__asm__ __volatile__ ("trapa   #0x11" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4) \
-       : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-__asm__ __volatile__ ("trapa   #0x12" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4), "r" (__sc5) \
-       : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-__asm__ __volatile__ ("trapa   #0x13" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
-       : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-__asm__ __volatile__ ("trapa   #0x14" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6),  \
-         "r" (__sc7) \
-       : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-__asm__ __volatile__ ("trapa   #0x15" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-         "r" (__sc3) \
-       : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-register long __sc1 __asm__ ("r1") = (long) arg6; \
-__asm__ __volatile__ ("trapa   #0x16" \
-       : "=z" (__sc0) \
-       : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7),  \
-         "r" (__sc3), "r" (__sc1) \
-       : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 68e27a8fca31f20d90257d454251a31b58e180b5..5efe906c59f7afe5888dbf71293bc1dc7545f10a 100644 (file)
@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
        consistent_free(NULL, size, vaddr, dma_handle);
 }
 
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                  enum dma_data_direction dir)
 {
        dma_cache_wback_inv((unsigned long)vaddr, size);
index ebd42eb1b709b914f04d9bbd6339703a7777d941..5b07b14c2927c24212692f9b1334fc81f58ce492 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef __ASM_SH64_SETUP_H
 #define __ASM_SH64_SETUP_H
 
+#define COMMAND_LINE_SIZE 256
+
+#ifdef __KERNEL__
+
 #define PARAM ((unsigned char *)empty_zero_page)
 #define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
 #define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
@@ -12,5 +16,7 @@
 #define COMMAND_LINE ((char *) (PARAM+256))
 #define COMMAND_LINE_SIZE 256
 
+#endif  /*  __KERNEL__  */
+
 #endif /* __ASM_SH64_SETUP_H */
 
index ee7828b27ad19334f694a440330cfb03ee56298a..1f38a7aacaafc8a2343821452c9ac5c092c03fab 100644 (file)
 #ifdef __KERNEL__ 
 
 #define NR_syscalls 321
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh64/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
-       /* Note: when returning from kernel the return value is in r9       \
-       **       This prevents conflicts between return value and arg1      \
-       **       when dispatching signal handler, in other words makes      \
-       **       life easier in the system call epilogue (see entry.S)      \
-       */                                                                  \
-        register unsigned long __sr2 __asm__ ("r2") = res;                 \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) {          \
-               errno = -(res);                                             \
-               __sr2 = -1;                                                 \
-       } \
-       return (type) (__sr2);                                              \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "()"                      \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0) );                                                    \
-__syscall_return(type,__sc0);                                              \
-}
-
-       /*
-        * The apparent spurious "dummy" assembler comment is *needed*,
-        * as without it, the compiler treats the arg<n> variables
-        * as no longer live just before the asm. The compiler can
-        * then optimize the storage into any registers it wishes.
-        * The additional dummy statement forces the compiler to put
-        * the arguments into the correct registers before the TRAPA.
-        */
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2)"                    \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2));                                        \
-__asm__ __volatile__ ("!dummy  %0 %1"                                      \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2));                                        \
-__syscall_return(type,__sc0);                                              \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2,%3)"                 \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3) );                          \
-__asm__ __volatile__ ("!dummy  %0 %1 %2"                                   \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3) );                          \
-__syscall_return(type,__sc0);                                              \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;        \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2,%3,%4)"              \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );             \
-__asm__ __volatile__ ("!dummy  %0 %1 %2 %3"                                \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );             \
-__syscall_return(type,__sc0);                                              \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;        \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;        \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2,%3,%4,%5)"           \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__asm__ __volatile__ ("!dummy  %0 %1 %2 %3 %4"                             \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__syscall_return(type,__sc0);                                              \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;        \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;        \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;        \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)"        \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-         "r" (__sc6));                                                     \
-__asm__ __volatile__ ("!dummy  %0 %1 %2 %3 %4 %5"                          \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-         "r" (__sc6));                                                     \
-__syscall_return(type,__sc0);                                              \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1;        \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2;        \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3;        \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4;        \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5;        \
-register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6;        \
-__asm__ __volatile__ ("trapa   %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)"     \
-       : "=r" (__sc0)                                                      \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-         "r" (__sc6), "r" (__sc7));                                        \
-__asm__ __volatile__ ("!dummy  %0 %1 %2 %3 %4 %5 %6"                       \
-       :                                                                   \
-       : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5),  \
-         "r" (__sc6), "r" (__sc7));                                        \
-__syscall_return(type,__sc0);                                              \
-}
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index f7827fa4cd5e79e5404f63568a3d60ed68d72381..d5b2f8053b3b8af9f921caf414b6b5430d19c048 100644 (file)
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res)\
-                     : "r" (__g1) \
-                     : "o0", "cc"); \
-if (__res < -255 || __res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__g1) \
-                     : "cc"); \
-if (__res < -255 || __res >= 0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__g1) \
-                     : "cc"); \
-if (__res < -255 || __res >= 0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-                     : "cc"); \
-if (__res < -255 || __res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-                     : "cc"); \
-if (__res < -255 || __res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x10\n\t" \
-                     "bcc 1f\n\t" \
-                     "mov %%o0, %0\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "1:\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-                     : "cc"); \
-if (__res < -255 || __res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index 27c46fbeebd6cd5c8ec59491d14114ac686a23b3..2f858a2df94a8e2f3d4b33eba0b4731c31a27216 100644 (file)
@@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        /* could define this in terms of the dma_cache ... operations,
index 7392fc4a954e2d603805c319071bc1cbb89e2d19..876312fe82ccf9dbf894a100a26978a5f8267f10 100644 (file)
@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index 010f9cd0a672137d89ed4569549e78dd6868197e..5891ff7ba7609db6a57cf6f11ae65417e887b94f 100644 (file)
@@ -13,7 +13,7 @@
 #include <asm/page.h>
 
 /* Page table allocation/freeing. */
-extern kmem_cache_t *pgtable_cache;
+extern struct kmem_cache *pgtable_cache;
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
index 63669dad0d72b5d4feae55137d11703791432020..47047536f261f6b5696e2d47c1e17609081aa0c9 100644 (file)
  *          find a free slot in the 0-302 range.
  */
 
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res)\
-                     : "r" (__g1) \
-                     : "o0", "cc"); \
-if (__res >= 0) \
-    return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__g1) \
-                     : "cc"); \
-if (__res >= 0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__g1) \
-                     : "cc"); \
-if (__res >= 0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
-                     : "cc"); \
-if (__res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
-                     : "cc"); \
-if (__res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
-                     "sub %%g0, %%o0, %0\n\t" \
-                     "movcc %%xcc, %%o0, %0\n\t" \
-                     : "=r" (__res), "=&r" (__o0) \
-                     : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
-                     : "cc"); \
-if (__res>=0) \
-       return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
 /* sysconf options, for SunOS compatibility */
 #define   _SC_ARG_MAX             1
 #define   _SC_CHILD_MAX           2
index babd2989511465887973098e1619e6d20a067669..f0ee4fb55911e7441c73710eda5ef0a0c11f621d 100644 (file)
@@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
 
 static inline int
 dma_get_cache_alignment(void)
@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
 }
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        BUG();
index 1bf096db8f4c258f2d71abd83414bb4611ee79bf..88687c181f01234e8463ef0581f93e983c0542fe 100644 (file)
@@ -46,8 +46,6 @@ extern void
 init_irq_handlers (int base_irq, int num, int interval,
                   struct hw_interrupt_type *irq_type);
 
-typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs);
-
 /* Handle interrupt IRQ.  REGS are the registers at the time of ther
    interrupt.  */
 extern unsigned int handle_irq (int irq, struct pt_regs *regs);
index 737401e7d3ad33a5c398577bfb465354d6e6b7dc..2241ed45ecfeb9a315253636cc12b52af8723f48 100644 (file)
 #define __NR_gettid            201
 #define __NR_tkill             202
 
-
-/* Syscall protocol:
-   Syscall number in r12, args in r6-r9, r13-r14
-   Return value in r10
-   Trap 0 for `short' syscalls, where all the args can fit in function
-   call argument registers, and trap 1 when there are additional args in
-   r13-r14.  */
-
-#define SYSCALL_NUM    "r12"
-#define SYSCALL_ARG0   "r6"
-#define SYSCALL_ARG1   "r7"
-#define SYSCALL_ARG2   "r8"
-#define SYSCALL_ARG3   "r9"
-#define SYSCALL_ARG4   "r13"
-#define SYSCALL_ARG5   "r14"
-#define SYSCALL_RET    "r10"
-
-#define SYSCALL_SHORT_TRAP     "0"
-#define SYSCALL_LONG_TRAP      "1"
-
-/* Registers clobbered by any syscall.  This _doesn't_ include the syscall
-   number (r12) or the `extended arg' registers (r13, r14), even though
-   they are actually clobbered too (this is because gcc's `asm' statement
-   doesn't allow a clobber to be used as an input or output).  */
-#define SYSCALL_CLOBBERS       "r1", "r5", "r11", "r15", "r16", \
-                               "r17", "r18", "r19"
-
-/* Registers clobbered by a `short' syscall.  This includes all clobbers
-   except the syscall number (r12).  */
-#define SYSCALL_SHORT_CLOBBERS SYSCALL_CLOBBERS, "r13", "r14"
-
 #ifdef __KERNEL__
 
-#include <asm/clinkage.h>
-#include <linux/err.h>
-
-#define __syscall_return(type, res)                                          \
-  do {                                                                       \
-         /* user-visible error numbers are in the range -1 - -MAX_ERRNO:      \
-            see <asm-v850/errno.h> */                                        \
-         if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \
-                 errno = -(res);                                             \
-                 res = -1;                                                   \
-         }                                                                   \
-         return (type) (res);                                                \
-  } while (0)
-
-
-#define _syscall0(type, name)                                                \
-type name (void)                                                             \
-{                                                                            \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP                           \
-                       : "=r" (__ret), "=r" (__syscall)                      \
-                       : "1" (__syscall)                                     \
-                       : SYSCALL_SHORT_CLOBBERS);                            \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define _syscall1(type, name, atype, a)                                              \
-type name (atype a)                                                          \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP                           \
-                       : "=r" (__ret), "=r" (__syscall)                      \
-                       : "1" (__syscall), "r" (__a)                          \
-                       : SYSCALL_SHORT_CLOBBERS);                            \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define _syscall2(type, name, atype, a, btype, b)                            \
-type name (atype a, btype b)                                                 \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP                           \
-                       : "=r" (__ret), "=r" (__syscall)                      \
-                       : "1" (__syscall), "r" (__a), "r" (__b)               \
-                       : SYSCALL_SHORT_CLOBBERS);                            \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c)                  \
-type name (atype a, btype b, ctype c)                                        \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;                             \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP                           \
-                       : "=r" (__ret), "=r" (__syscall)                      \
-                       : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c)    \
-                       : SYSCALL_SHORT_CLOBBERS);                            \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d)        \
-type name (atype a, btype b, ctype c, dtype d)                               \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;                             \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;                             \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP                           \
-                       : "=r" (__ret), "=r" (__syscall)                      \
-                       : "1" (__syscall),                                    \
-                       "r" (__a), "r" (__b), "r" (__c), "r" (__d)            \
-                       : SYSCALL_SHORT_CLOBBERS);                            \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\
-type name (atype a, btype b, ctype c, dtype d, etype e)                              \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;                             \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;                             \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;                             \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP                            \
-                       : "=r" (__ret), "=r" (__syscall), "=r" (__e)          \
-                       : "1" (__syscall),                                    \
-                       "r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \
-                       : SYSCALL_CLOBBERS);                                  \
-  __syscall_return (type, __ret);                                            \
-}
-
-#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f)                              \
-  __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP                            \
-                       : "=r" (ret), "=r" (syscall),                         \
-                       "=r" (e), "=r" (f)                                    \
-                       : "1" (syscall),                                      \
-                       "r" (a), "r" (b), "r" (c), "r" (d),                   \
-                       "2" (e), "3" (f)                                      \
-                       : SYSCALL_CLOBBERS);
-
-#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
-type name (atype a, btype b, ctype c, dtype d, etype e, ftype f)             \
-{                                                                            \
-  register atype __a __asm__ (SYSCALL_ARG0) = a;                             \
-  register btype __b __asm__ (SYSCALL_ARG1) = b;                             \
-  register ctype __c __asm__ (SYSCALL_ARG2) = c;                             \
-  register dtype __d __asm__ (SYSCALL_ARG3) = d;                             \
-  register etype __e __asm__ (SYSCALL_ARG4) = e;                             \
-  register etype __f __asm__ (SYSCALL_ARG5) = f;                             \
-  register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name;              \
-  register unsigned long __ret __asm__ (SYSCALL_RET);                        \
-  __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f);           \
-  __syscall_return (type, __ret);                                            \
-}
-               
-
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index 1ee9b07f3fe6adcc2d936566b237147821289e81..ebd7117782a67f1dfe4995a3165a947ce80bc7b7 100644 (file)
@@ -6,13 +6,11 @@ ALTARCHDEF := defined __i386__
 
 header-y += boot.h
 header-y += bootsetup.h
-header-y += cpufeature.h
 header-y += debugreg.h
 header-y += ldt.h
 header-y += msr.h
 header-y += prctl.h
 header-y += ptrace-abi.h
-header-y += setup.h
 header-y += sigcontext32.h
 header-y += ucontext.h
 header-y += vsyscall32.h
index a584826cc570f6aa6415b62d255a789feb8b22a6..a6657b4f3e0eccc7660983382c378b67813d16d3 100644 (file)
@@ -4,6 +4,7 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
+#include <linux/stddef.h>
 #include <asm/cpufeature.h>
 
 struct alt_instr {
@@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {}
 #define LOCK_PREFIX ""
 #endif
 
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
 #endif /* _X86_64_ALTERNATIVE_H */
index 007e88d6d43f9ee32e73121c8ff17d3d47047bd1..706ca4b60000ec2091efa2a45e096a4f5fef0c5e 100644 (file)
@@ -21,7 +21,7 @@
  * on us. We need to use _exactly_ the address the user gave us,
  * not some alias that contains the same information.
  */
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { int counter; } atomic_t;
 
 #define ATOMIC_INIT(i) { (i) }
 
@@ -189,9 +189,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
 {
        int __i = i;
        __asm__ __volatile__(
-               LOCK_PREFIX "xaddl %0, %1;"
-               :"=r"(i)
-               :"m"(v->counter), "0"(i));
+               LOCK_PREFIX "xaddl %0, %1"
+               :"+r" (i), "+m" (v->counter)
+               : : "memory");
        return i + __i;
 }
 
index 6b93f5a3a5c8ee81eff80cf96ed40653338f58ed..7ee90064571955ca938b43b6b1c77364037c0978 100644 (file)
@@ -51,6 +51,8 @@ struct iommu_table {
 #define TCE_TABLE_SIZE_4M              6
 #define TCE_TABLE_SIZE_8M              7
 
+extern int use_calgary;
+
 #ifdef CONFIG_CALGARY_IOMMU
 extern int calgary_iommu_init(void);
 extern void detect_calgary(void);
index ee792faaca013fa17cf0887f2008a278eac5fe17..0b3c686139f1505340db96be8dc52585f584a9fc 100644 (file)
@@ -29,7 +29,7 @@
 #define X86_FEATURE_PSE36      (0*32+17) /* 36-bit PSEs */
 #define X86_FEATURE_PN         (0*32+18) /* Processor serial number */
 #define X86_FEATURE_CLFLSH     (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES       (0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS         (0*32+21) /* Debug Store */
 #define X86_FEATURE_ACPI       (0*32+22) /* ACPI via MSR */
 #define X86_FEATURE_MMX                (0*32+23) /* Multimedia Extensions */
 #define X86_FEATURE_FXSR       (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -68,6 +68,8 @@
 #define X86_FEATURE_FXSAVE_LEAK (3*32+7)  /* FIP/FOP/FDP leaks through FXSAVE */
 #define X86_FEATURE_UP         (3*32+8) /* SMP kernel running on UP */
 #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS       (3*32+10) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                (3*32+11) /* Branch Trace Store */
 
 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
 #define X86_FEATURE_XMM3       (4*32+ 0) /* Streaming SIMD Extensions-3 */
 #define cpu_has_cyrix_arr      0
 #define cpu_has_centaur_mcr    0
 #define cpu_has_clflush               boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_ds            boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs          boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_bts           boot_cpu_has(X86_FEATURE_BTS)
 
 #endif /* __ASM_X8664_CPUFEATURE_H */
index 65f64acc53192946592d189463508d7f1e9d4c09..c2669f1f5529065e83e6eaf58a17943aff5e81c0 100644 (file)
@@ -7,18 +7,21 @@
  * Delay routines calling functions in arch/x86_64/lib/delay.c
  */
  
+/* Undefined functions to get compile-time errors */
 extern void __bad_udelay(void);
 extern void __bad_ndelay(void);
 
 extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
 extern void __const_udelay(unsigned long usecs);
 extern void __delay(unsigned long loops);
 
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
 #define udelay(n) (__builtin_constant_p(n) ? \
-       ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
+       ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
        __udelay(n))
 
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
 #define ndelay(n) (__builtin_constant_p(n) ? \
        ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
        __ndelay(n))
index eb7723a467908b5f86cb0e2fc2dd24168c8e523d..913d6ac0003381a5ea01d3dcc35b20a0fca0bcac 100644 (file)
@@ -9,64 +9,13 @@
 
 #include <linux/string.h>
 #include <linux/smp.h>
+#include <asm/desc_defs.h>
 
 #include <asm/segment.h>
 #include <asm/mmu.h>
 
-// 8 byte segment descriptor
-struct desc_struct { 
-       u16 limit0;
-       u16 base0;
-       unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
-       unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-} __attribute__((packed)); 
-
-struct n_desc_struct { 
-       unsigned int a,b;
-};     
-
 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
 
-enum { 
-       GATE_INTERRUPT = 0xE, 
-       GATE_TRAP = 0xF,        
-       GATE_CALL = 0xC,
-};     
-
-// 16byte gate
-struct gate_struct {          
-       u16 offset_low;
-       u16 segment; 
-       unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
-       u16 offset_middle;
-       u32 offset_high;
-       u32 zero1; 
-} __attribute__((packed));
-
-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) 
-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-
-enum { 
-       DESC_TSS = 0x9,
-       DESC_LDT = 0x2,
-}; 
-
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct ldttss_desc { 
-       u16 limit0;
-       u16 base0;
-       unsigned base1 : 8, type : 5, dpl : 2, p : 1;
-       unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
-       u32 base3;
-       u32 zero1; 
-} __attribute__((packed)); 
-
-struct desc_ptr {
-       unsigned short size;
-       unsigned long address;
-} __attribute__((packed)) ;
-
 #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
 #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
 #define clear_LDT()  asm volatile("lldt %w0"::"r" (0))
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
new file mode 100644 (file)
index 0000000..0890040
--- /dev/null
@@ -0,0 +1,69 @@
+/* Written 2000 by Andi Kleen */
+#ifndef __ARCH_DESC_DEFS_H
+#define __ARCH_DESC_DEFS_H
+
+/*
+ * Segment descriptor structure definitions, usable from both x86_64 and i386
+ * archs.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+       u16 limit0;
+       u16 base0;
+       unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+       unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+       unsigned int a,b;
+};
+
+enum {
+       GATE_INTERRUPT = 0xE,
+       GATE_TRAP = 0xF,
+       GATE_CALL = 0xC,
+};
+
+// 16byte gate
+struct gate_struct {
+       u16 offset_low;
+       u16 segment;
+       unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+       u16 offset_middle;
+       u32 offset_high;
+       u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+       DESC_TSS = 0x9,
+       DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+       u16 limit0;
+       u16 base0;
+       unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+       unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+       u32 base3;
+       u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+       unsigned short size;
+       unsigned long address;
+} __attribute__((packed)) ;
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
index 10174b110a5cf9bb332b531c24f2cedef3b35c0a..be9ec68907237f4690101216d0b5f93c5616ed95 100644 (file)
@@ -180,12 +180,13 @@ static inline int dma_get_cache_alignment(void)
        return boot_cpu_data.x86_clflush_size;
 }
 
-#define dma_is_consistent(h) 1
+#define dma_is_consistent(d, h) 1
 
 extern int dma_set_mask(struct device *dev, u64 mask);
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+       enum dma_data_direction dir)
 {
        flush_write_buffers();
 }
index 9804bf07b092f4bb154f19284047a9a9711bc989..5cdfb08013c38889dc8a42a2dff4c4a3c1833ff8 100644 (file)
@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
        if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       inc_preempt_count();
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
                ret = -ENOSYS;
        }
 
-       dec_preempt_count();
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index a0e9a4b934844a96c4dced8b8cdd293e2b5decfb..b80f4bb5f2733554f096788cb067b06519f1053c 100644 (file)
@@ -30,6 +30,6 @@ struct genapic {
 };
 
 
-extern struct genapic *genapic;
+extern struct genapic *genapic, *genapic_force, apic_flat;
 
 #endif
index 37e194169fac2f2793894331eb9ec34d81401dfa..952783d35c7b4ceea3a3b898af80be5e142736f8 100644 (file)
@@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(unsigned int op)
 #define MSR_LSTAR 0xc0000082           /* long mode SYSCALL target */
 #define MSR_CSTAR 0xc0000083           /* compatibility mode SYSCALL target */
 #define MSR_SYSCALL_MASK 0xc0000084    /* EFLAGS mask for syscall */
-#define MSR_FS_BASE 0xc0000100         /* 64bit GS base */
-#define MSR_GS_BASE 0xc0000101         /* 64bit FS base */
+#define MSR_FS_BASE 0xc0000100         /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101         /* 64bit GS base */
 #define MSR_KERNEL_GS_BASE  0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ 
 /* EFER bits: */ 
 #define _EFER_SCE 0  /* SYSCALL/SYSRET */
@@ -210,6 +210,10 @@ static inline unsigned int cpuid_edx(unsigned int op)
 #define MSR_IA32_LASTINTFROMIP     0x1dd
 #define MSR_IA32_LASTINTTOIP       0x1de
 
+#define MSR_IA32_PEBS_ENABLE           0x3f1
+#define MSR_IA32_DS_AREA               0x600
+#define MSR_IA32_PERF_CAPABILITIES     0x345
+
 #define MSR_MTRRfix64K_00000   0x250
 #define MSR_MTRRfix16K_80000   0x258
 #define MSR_MTRRfix16K_A0000   0x259
@@ -407,4 +411,13 @@ static inline unsigned int cpuid_edx(unsigned int op)
 #define MSR_P4_U2L_ESCR0               0x3b0
 #define MSR_P4_U2L_ESCR1               0x3b1
 
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0       0x309
+#define MSR_CORE_PERF_FIXED_CTR1       0x30a
+#define MSR_CORE_PERF_FIXED_CTR2       0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL   0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS    0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL      0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL  0x390
+
 #endif
index f367d4014b423b5298df04dfc556ff4d5aeaf3fb..72375e7d32a895846200ff921ebfd5e8776296bf 100644 (file)
@@ -77,4 +77,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
 
 extern int unknown_nmi_panic;
 
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
 #endif /* ASM_NMI_H */
index eba9cb471df38b1bee0b4893482fccb576411795..6823fa4f1afa3c8a43a17697cba5ce4ecf63cb36 100644 (file)
@@ -10,6 +10,7 @@ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
 extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
 extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
 extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
+extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
 
 extern int early_pci_allowed(void);
 
index 0555c1c4d8fa9fcda13c280997c076decc85d708..59901c690a0d4882744c770c4aabfa39ad605bb7 100644 (file)
@@ -221,20 +221,19 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
 #define __S110 PAGE_SHARED_EXEC
 #define __S111 PAGE_SHARED_EXEC
 
-static inline unsigned long pgd_bad(pgd_t pgd) 
-{ 
-       unsigned long val = pgd_val(pgd);
-       val &= ~PTE_MASK; 
-       val &= ~(_PAGE_USER | _PAGE_DIRTY); 
-       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);      
-} 
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+       return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
 
 static inline unsigned long pud_bad(pud_t pud)
 {
-       unsigned long val = pud_val(pud);
-       val &= ~PTE_MASK;
-       val &= ~(_PAGE_USER | _PAGE_DIRTY);
-       return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+       return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+static inline unsigned long pmd_bad(pmd_t pmd)
+{
+       return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
 }
 
 #define pte_none(x)    (!pte_val(x))
@@ -347,7 +346,6 @@ static inline int pmd_large(pmd_t pte) {
 #define pmd_none(x)    (!pmd_val(x))
 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
 #define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
-#define        pmd_bad(x)      ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
 #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
 #define pmd_pfn(x)  ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
 
index cef17e0f828cc5d0080cf967d11e80a228f4d18b..76552d72804c6368ed210fd02811c0f9608308e7 100644 (file)
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
                : :"a" (eax), "c" (ecx));
 }
 
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+       /* "mwait %eax,%ecx;" */
+       asm volatile(
+               "sti; .byte 0x0f,0x01,0xc9;"
+               : :"a" (eax), "c" (ecx));
+}
+
 extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
 
 #define stack_current() \
index e72cfcdf53448d262ce0b549de267ae470f8ebe1..6d324b83897207f56f9e348bc6ef6feae8c553ef 100644 (file)
@@ -61,7 +61,6 @@ extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
 extern unsigned long numa_free_all_bootmem(void);
 
 extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
-extern void free_bootmem_generic(unsigned long phys, unsigned len);
 
 extern void load_gs_index(unsigned gs);
 
@@ -88,6 +87,7 @@ extern void syscall32_cpu_init(void);
 extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
 
 extern void early_quirks(void);
+extern void quirk_intel_irqbalance(void);
 extern void check_efer(void);
 
 extern int unhandled_signal(struct task_struct *tsk, int sig);
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
new file mode 100644 (file)
index 0000000..c7350f6
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Derived from include/asm-i386/mach-summit/mach_mpparse.h
+ *          and include/asm-i386/mach-default/bios_ebda.h
+ *
+ * Author: Laurent Vivier <Laurent.Vivier@bull.net>
+ */
+
+#ifndef __ASM_RIO_H
+#define __ASM_RIO_H
+
+#define RIO_TABLE_VERSION      3
+
+struct rio_table_hdr {
+       u8 version;      /* Version number of this data structure  */
+       u8 num_scal_dev; /* # of Scalability devices               */
+       u8 num_rio_dev;  /* # of RIO I/O devices                   */
+} __attribute__((packed));
+
+struct scal_detail {
+       u8 node_id;      /* Scalability Node ID                    */
+       u32 CBAR;        /* Address of 1MB register space          */
+       u8 port0node;    /* Node ID port connected to: 0xFF=None   */
+       u8 port0port;    /* Port num port connected to: 0,1,2, or  */
+                        /* 0xFF=None                              */
+       u8 port1node;    /* Node ID port connected to: 0xFF = None */
+       u8 port1port;    /* Port num port connected to: 0,1,2, or  */
+                        /* 0xFF=None                              */
+       u8 port2node;    /* Node ID port connected to: 0xFF = None */
+       u8 port2port;    /* Port num port connected to: 0,1,2, or  */
+                        /* 0xFF=None                              */
+       u8 chassis_num;  /* 1 based Chassis number (1 = boot node) */
+} __attribute__((packed));
+
+struct rio_detail {
+       u8 node_id;      /* RIO Node ID                            */
+       u32 BBAR;        /* Address of 1MB register space          */
+       u8 type;         /* Type of device                         */
+       u8 owner_id;     /* Node ID of Hurricane that owns this    */
+                        /* node                                   */
+       u8 port0node;    /* Node ID port connected to: 0xFF=None   */
+       u8 port0port;    /* Port num port connected to: 0,1,2, or  */
+                        /* 0xFF=None                              */
+       u8 port1node;    /* Node ID port connected to: 0xFF=None   */
+       u8 port1port;    /* Port num port connected to: 0,1,2, or  */
+                        /* 0xFF=None                              */
+       u8 first_slot;   /* Lowest slot number below this Calgary  */
+       u8 status;       /* Bit 0 = 1 : the XAPIC is used          */
+                        /*       = 0 : the XAPIC is not used, ie: */
+                        /*            ints fwded to another XAPIC */
+                        /*           Bits1:7 Reserved             */
+       u8 WP_index;     /* instance index - lower ones have       */
+                        /*     lower slot numbers/PCI bus numbers */
+       u8 chassis_num;  /* 1 based Chassis number                 */
+} __attribute__((packed));
+
+enum {
+       HURR_SCALABILTY = 0,  /* Hurricane Scalability info */
+       HURR_RIOIB      = 2,  /* Hurricane RIOIB info       */
+       COMPAT_CALGARY  = 4,  /* Compatibility Calgary      */
+       ALT_CALGARY     = 5,  /* Second Planar Calgary      */
+};
+
+/*
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E.
+ */
+static inline unsigned long get_bios_ebda(void)
+{
+       unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
+       address <<= 4;
+       return address;
+}
+
+#endif /* __ASM_RIO_H */
index d6b7c057edbaf281ddb97963e027e17931a01211..e17b9ec42e98d105d61d373ce47a828f8e6eaabc 100644 (file)
@@ -82,11 +82,6 @@ extern u8 x86_cpu_to_apicid[NR_CPUS];        /* physical ID */
 extern u8 x86_cpu_to_log_apicid[NR_CPUS];
 extern u8 bios_cpu_apicid[];
 
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-{
-       return cpus_addr(cpumask)[0];
-}
-
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
        if (mps_cpu < NR_CPUS)
@@ -118,13 +113,6 @@ static __inline int logical_smp_processor_id(void)
 #define cpu_physical_id(cpu)           x86_cpu_to_apicid[cpu]
 #else
 #define cpu_physical_id(cpu)           boot_cpu_id
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
-                               void *info, int retry, int wait)
-{
-       /* Disable interrupts here? */
-       func(info);
-       return 0;
-}
 #endif /* !CONFIG_SMP */
 #endif
 
index 05ef097ba55b26dccd45d997417228ad79a110ba..88bf981e73cf45c084c66d6d60c25e066fa53340 100644 (file)
@@ -36,7 +36,34 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
                "2:\t" : "=m" (lock->slock) : : "memory");
 }
 
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+/*
+ * Same as __raw_spin_lock, but reenable interrupts during spinning.
+ */
+#ifndef CONFIG_PROVE_LOCKING
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+       asm volatile(
+               "\n1:\t"
+               LOCK_PREFIX " ; decl %0\n\t"
+               "jns 5f\n"
+               "testl $0x200, %1\n\t"  /* interrupts were disabled? */
+               "jz 4f\n\t"
+               "sti\n"
+               "3:\t"
+               "rep;nop\n\t"
+               "cmpl $0, %0\n\t"
+               "jle 3b\n\t"
+               "cli\n\t"
+               "jmp 1b\n"
+               "4:\t"
+               "rep;nop\n\t"
+               "cmpl $0, %0\n\t"
+               "jg 1b\n\t"
+               "jmp 4b\n"
+               "5:\n\t"
+               : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
+}
+#endif
 
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
index 59efe849f351f0bc556b73e1f54dcfec1d09463a..4da9345c15001803a3e7786811f685a0ec5c4616 100644 (file)
@@ -6,13 +6,13 @@
 #endif
 
 typedef struct {
-       volatile unsigned int slock;
+       unsigned int slock;
 } raw_spinlock_t;
 
 #define __RAW_SPIN_LOCK_UNLOCKED       { 1 }
 
 typedef struct {
-       volatile unsigned int lock;
+       unsigned int lock;
 } raw_rwlock_t;
 
 #define __RAW_RW_LOCK_UNLOCKED         { RW_LOCK_BIAS }
index 5eb9799bef76fcc77053c2529d5177b08ca6a541..6f0b5459430739c0d60b936a28233e6841b23aa5 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_STACKTRACE_H
 #define _ASM_STACKTRACE_H 1
 
+extern int kstack_depth_to_print;
+
 /* Generic stack tracer with callbacks */
 
 struct stacktrace_ops {
index c86c2e6793e2a87b55224a7d17f9dfa077ae75e3..2d4491aae281c11a704de23d7ee243e26c1cec15 100644 (file)
@@ -48,9 +48,6 @@ typedef unsigned long long u64;
 typedef u64 dma64_addr_t;
 typedef u64 dma_addr_t;
 
-typedef u64 sector_t;
-#define HAVE_SECTOR_T
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
index 777288eb7e75ecc5272599a3995874162ab4c1b2..c5f596e71faa71f9cf03d127119f5599653fe11e 100644 (file)
@@ -622,25 +622,7 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
 
 #define __NR_syscall_max __NR_move_pages
 
-#ifdef __KERNEL__
-#include <linux/err.h>
-#endif
-
 #ifndef __NO_STUBS
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO */
-
-#define __syscall_clobber "r11","rcx","memory" 
-
-#define __syscall_return(type, res) \
-do { \
-       if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
-               errno = -(res); \
-               res = -1; \
-       } \
-       return (type) (res); \
-} while (0)
-
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
 #define __ARCH_WANT_SYS_ALARM
@@ -664,87 +646,6 @@ do { \
 #define __ARCH_WANT_SYS_TIME
 #define __ARCH_WANT_COMPAT_SYS_TIME
 
-#define __syscall "syscall"
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-                 "d" ((long)(arg3)) : __syscall_clobber); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ;" __syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-         "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
-__syscall_return(type,__res); \
-} 
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-         "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
-       __syscall_clobber,"r8","r10" ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
-         type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
-       : "=a" (__res) \
-       : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
-         "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
-         "g" ((long)(arg6)) : \
-       __syscall_clobber,"r8","r10","r9" ); \
-__syscall_return(type,__res); \
-}
-
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
index 2e7ff10fd7751db2e967101c4b7749ad32b7f3a8..2f6349e4871765aaee69e23275c0755e4a9d98da 100644 (file)
@@ -87,14 +87,10 @@ extern int arch_unwind_init_running(struct unwind_frame_info *,
 
 static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
 {
-#if 0 /* This can only work when selector register saves/restores
-         are properly annotated (and tracked in UNW_REGISTER_INFO). */
-       return user_mode(&info->regs);
-#else
-       return (long)info->regs.rip >= 0
+       return user_mode(&info->regs)
+              || (long)info->regs.rip >= 0
               || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
               || (long)info->regs.rsp >= 0;
-#endif
 }
 
 #else
index 01d1c17e2849abb50a2bf4bffb4185a726d886f5..05cb8dd200de7742456de435aa31c586b5df2776 100644 (file)
@@ -10,6 +10,7 @@ enum vsyscall_num {
 #define VSYSCALL_START (-10UL << 20)
 #define VSYSCALL_SIZE 1024
 #define VSYSCALL_END (-2UL << 20)
+#define VSYSCALL_MAPPED_PAGES 1
 #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
 
 #ifdef __KERNEL__
index c39c91dfcc695a55e16933d60974310460987151..82b03b3a2ee6a4103726e69006319ba84158fb1e 100644 (file)
@@ -170,10 +170,10 @@ dma_get_cache_alignment(void)
        return L1_CACHE_BYTES;
 }
 
-#define dma_is_consistent(d)   (1)
+#define dma_is_consistent(d, h)        (1)
 
 static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
               enum dma_data_direction direction)
 {
        consistent_sync(vaddr, size, direction);
index 411f810a55c604e0e5e5e77eede58f69dab8ddd8..2e1a1b997e7d7f89ede319c9029ec8ef90437458 100644 (file)
 
 #define SYSXTENSA_COUNT                   5    /* count of syscall0 functions*/
 
-#ifdef __KERNEL__
-#include <linux/linkage.h>
-
-#define __syscall_return(type, res) return ((type)(res))
-
-/* Tensilica's xt-xcc compiler is much more agressive at code
- * optimization than gcc.  Multiple __asm__ statements are
- * insufficient for xt-xcc because subsequent optimization passes
- * (beyond the front-end that knows of __asm__ statements and other
- * such GNU Extensions to C) can modify the register selection for
- * containment of C variables.
- *
- * xt-xcc cannot modify the contents of a single __asm__ statement, so
- * we create single-asm versions of the syscall macros that are
- * suitable and optimal for both xt-xcc and gcc.
- *
- * Linux takes system-call arguments in registers.  The following
- * design is optimized for user-land apps (e.g., glibc) which
- * typically have a function wrapper around the "syscall" assembly
- * instruction.  It satisfies the Xtensa ABI while minizing argument
- * shifting.
- *
- * The Xtensa ABI and software conventions require the system-call
- * number in a2.  If an argument exists in a2, we move it to the next
- * available register.  Note that for improved efficiency, we do NOT
- * shift all parameters down one register to maintain the original
- * order.
- *
- * At best case (zero arguments), we just write the syscall number to
- * a2.  At worst case (1 to 6 arguments), we move the argument in a2
- * to the next available register, then write the syscall number to
- * a2.
- *
- * For clarity, the following truth table enumerates all possibilities.
- *
- * arguments   syscall number  arg0, arg1, arg2, arg3, arg4, arg5
- * ---------   --------------  ----------------------------------
- *     0             a2
- *     1             a2        a3
- *     2             a2        a4,   a3
- *     3             a2        a5,   a3,   a4
- *     4             a2        a6,   a3,   a4,   a5
- *     5             a2        a7,   a3,   a4,   a5,   a6
- *     6             a2        a8,   a3,   a4,   a5,   a6,   a7
- */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name) \
-       : "a2" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type0,arg0) \
-type name(type0 arg0) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a3, %2 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0) \
-       : "a2", "a3" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type0,arg0,type1,arg1) \
-type name(type0 arg0,type1 arg1) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a4, %2 \n" \
-       "  mov   a3, %3 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0), "a" (arg1) \
-       : "a2", "a3", "a4" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
-type name(type0 arg0,type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a5, %2 \n" \
-       "  mov   a4, %4 \n" \
-       "  mov   a3, %3 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
-       : "a2", "a3", "a4", "a5" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a6, %2 \n" \
-       "  mov   a5, %5 \n" \
-       "  mov   a4, %4 \n" \
-       "  mov   a3, %3 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
-       : "a2", "a3", "a4", "a5", "a6" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a9, a7 \n" \
-       "  mov   a7, %2 \n" \
-       "  mov   a6, %6 \n" \
-       "  mov   a5, %5 \n" \
-       "  mov   a4, %4 \n" \
-       "  mov   a3, %3 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   a7, a9 \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4) \
-       : "a2", "a3", "a4", "a5", "a6", "a9" \
-       ); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
-       "  mov   a9, a7 \n" \
-       "  mov   a8, %2 \n" \
-       "  mov   a7, %7 \n" \
-       "  mov   a6, %6 \n" \
-       "  mov   a5, %5 \n" \
-       "  mov   a4, %4 \n" \
-       "  mov   a3, %3 \n" \
-       "  movi  a2, %1 \n" \
-       "  syscall      \n" \
-       "  mov   a7, a9 \n" \
-       "  mov   %0, a2 \n" \
-       : "=a" (__res) \
-       : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
-                             "a" (arg3), "a" (arg4), "a" (arg5)  \
-       : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
-       ); \
-__syscall_return(type,__res); \
-}
-
 /*
  * "Conditional" syscalls
  *
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
new file mode 100644 (file)
index 0000000..0b8e6bc
--- /dev/null
@@ -0,0 +1,80 @@
+/* b128ops.h - common 128-bit block operations
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 13/06/2006
+*/
+
+#ifndef _CRYPTO_B128OPS_H
+#define _CRYPTO_B128OPS_H
+
+#include <linux/types.h>
+
+typedef struct {
+       u64 a, b;
+} u128;
+
+typedef struct {
+       __be64 a, b;
+} be128;
+
+typedef struct {
+       __le64 b, a;
+} le128;
+
+static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+{
+       r->a = p->a ^ q->a;
+       r->b = p->b ^ q->b;
+}
+
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
+{
+       u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
+{
+       u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
new file mode 100644 (file)
index 0000000..4fd3152
--- /dev/null
@@ -0,0 +1,198 @@
+/* gf128mul.h - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.   All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+   1. distributions of this source code include the above copyright
+      notice, this list of conditions and the following disclaimer;
+
+   2. distributions in binary form include the above copyright
+      notice, this list of conditions and the following disclaimer
+      in the documentation and/or other associated materials;
+
+   3. the copyright holder's name is not used to endorse products
+      built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 31/01/2006
+
+ An implementation of field multiplication in Galois Field GF(128)
+*/
+
+#ifndef _CRYPTO_GF128MUL_H
+#define _CRYPTO_GF128MUL_H
+
+#include <crypto/b128ops.h>
+#include <linux/slab.h>
+
+/* Comment by Rik:
+ *
+ * For some background on GF(2^128) see for example: http://-
+ * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
+ * be mapped to computer memory in a variety of ways. Let's examine
+ * three common cases.
+ *
+ * Take a look at the 16 binary octets below in memory order. The msb's
+ * are left and the lsb's are right. char b[16] is an array and b[0] is
+ * the first octet.
+ *
+ * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ *   b[0]     b[1]     b[2]     b[3]          b[13]    b[14]    b[15]
+ *
+ * Every bit is a coefficient of some power of X. We can store the bits
+ * in every byte in little-endian order and the bytes themselves also in
+ * little endian order. I will call this lle (little-little-endian).
+ * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
+ * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
+ * This format was originally implemented in gf128mul and is used
+ * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
+ *
+ * Another convention says: store the bits in bigendian order and the
+ * bytes also. This is bbe (big-big-endian). Now the buffer above
+ * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
+ * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
+ * is partly implemented.
+ *
+ * Both of the above formats are easy to implement on big-endian
+ * machines.
+ *
+ * EME (which is patent encumbered) uses the ble format (bits are stored
+ * in big endian order and the bytes in little endian). The above buffer
+ * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ *
+ * The common machine word-size is smaller than 128 bits, so to make
+ * an efficient implementation we must split into machine word sizes.
+ * This file uses one 32bit for the moment. Machine endianness comes into
+ * play. The lle format in relation to machine endianness is discussed
+ * below by the original author of gf128mul Dr Brian Gladman.
+ *
+ * Let's look at the bbe and ble format on a little endian machine.
+ *
+ * bbe on a little endian machine u32 x[4]:
+ *
+ *  MS            x[0]           LS  MS            x[1]                  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  103..96 111.104 119.112 127.120  71...64 79...72 87...80 95...88
+ *
+ *  MS            x[2]           LS  MS            x[3]                  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  39...32 47...40 55...48 63...56  07...00 15...08 23...16 31...24
+ *
+ * ble on a little endian machine
+ *
+ *  MS            x[0]           LS  MS            x[1]                  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  31...24 23...16 15...08 07...00  63...56 55...48 47...40 39...32
+ *
+ *  MS            x[2]           LS  MS            x[3]                  LS
+ *  ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+ *  95...88 87...80 79...72 71...64  127.120 199.112 111.104 103..96
+ *
+ * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
+ * ble (and lbe also) are easier to implement on a little-endian
+ * machine than on a big-endian machine. The converse holds for bbe
+ * and lle.
+ *
+ * Note: to have good alignment, it seems to me that it is sufficient
+ * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
+ * machines this will automatically aligned to wordsize and on a 64-bit
+ * machine also.
+ */
+/*     Multiply a GF128 field element by x. Field elements are held in arrays
+    of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
+    indexed bits placed in the more numerically significant bit positions
+    within bytes.
+
+    On little endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]                  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    24...31 16...23 08...15 00...07  56...63 48...55 40...47 32...39
+
+    MS            x[2]           LS  MS            x[3]                  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    88...95 80...87 72...79 64...71  120.127 112.119 104.111 96..103
+
+    On big endian machines the bit indexes translate into the bit
+    positions within four 32-bit words in the following way
+
+    MS            x[0]           LS  MS            x[1]                  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    00...07 08...15 16...23 24...31  32...39 40...47 48...55 56...63
+
+    MS            x[2]           LS  MS            x[3]                  LS
+    ms   ls ms   ls ms   ls ms   ls  ms   ls ms   ls ms   ls ms   ls
+    64...71 72...79 80...87 88...95  96..103 104.111 112.119 120.127
+*/
+
+/*     A slow generic version of gf_mul, implemented for lle and bbe
+ *     It multiplies a and b and puts the result in a */
+void gf128mul_lle(be128 *a, const be128 *b);
+
+void gf128mul_bbe(be128 *a, const be128 *b);
+
+
+/* 4k table optimization */
+
+struct gf128mul_4k {
+       be128 t[256];
+};
+
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+
+static inline void gf128mul_free_4k(struct gf128mul_4k *t)
+{
+       kfree(t);
+}
+
+
+/* 64k table optimization, implemented for lle and bbe */
+
+struct gf128mul_64k {
+       struct gf128mul_4k *t[16];
+};
+
+/* first initialize with the constant factor with which you
+ * want to multiply and then call gf128_64k_lle with the other
+ * factor in the first argument, the table in the second and a
+ * scratch register in the third. Afterwards *a = *r. */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
+void gf128mul_free_64k(struct gf128mul_64k *t);
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+
+#endif /* _CRYPTO_GF128MUL_H */
index ff433126361fc51cd12e2063814dba2a96365262..e618b25b5addc94d65e8926aa9161e6ed24e6a05 100644 (file)
@@ -221,6 +221,7 @@ unifdef-y += if_bridge.h
 unifdef-y += if_ec.h
 unifdef-y += if_eql.h
 unifdef-y += if_ether.h
+unifdef-y += if_fddi.h
 unifdef-y += if_frad.h
 unifdef-y += if_ltalk.h
 unifdef-y += if_pppox.h
@@ -282,6 +283,7 @@ unifdef-y += nvram.h
 unifdef-y += parport.h
 unifdef-y += patchkey.h
 unifdef-y += pci.h
+unifdef-y += personality.h
 unifdef-y += pktcdvd.h
 unifdef-y += pmu.h
 unifdef-y += poll.h
@@ -337,6 +339,7 @@ unifdef-y += videodev.h
 unifdef-y += wait.h
 unifdef-y += wanrouter.h
 unifdef-y += watchdog.h
+unifdef-y += wireless.h
 unifdef-y += xfrm.h
 
 objhdr-y += version.h
index 0d71c0041f137e09eb48c06017af91fafb84c22d..3372ec6bf53a25d5d65d540008f75bde975e1c13 100644 (file)
@@ -111,7 +111,6 @@ struct kiocb {
        size_t                  ki_nbytes;      /* copy of iocb->aio_nbytes */
        char                    __user *ki_buf; /* remaining iocb->aio_buf */
        size_t                  ki_left;        /* remaining bytes */
-       long                    ki_retried;     /* just for testing */
        struct iovec            ki_inline_vec;  /* inline vector */
        struct iovec            *ki_iovec;
        unsigned long           ki_nr_segs;
@@ -194,7 +193,7 @@ struct kioctx {
 
        struct aio_ring_info    ring_info;
 
-       struct work_struct      wq;
+       struct delayed_work     wq;
 };
 
 /* prototypes */
@@ -238,7 +237,6 @@ do {                                                                        \
 } while (0)
 
 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
 
 #include <linux/aio_abi.h>
 
index b2ca666d9997ce35025fd85f57c655639bdab0d2..0e07db6cc0d0eeefc1c58e2f4980016afe713d05 100644 (file)
 #define AUDIT_MAC_CIPSOV4_DEL  1408    /* NetLabel: del CIPSOv4 DOI entry */
 #define AUDIT_MAC_MAP_ADD      1409    /* NetLabel: add LSM domain mapping */
 #define AUDIT_MAC_MAP_DEL      1410    /* NetLabel: del LSM domain mapping */
+#define AUDIT_MAC_IPSEC_ADDSA  1411    /* Add a XFRM state */
+#define AUDIT_MAC_IPSEC_DELSA  1412    /* Delete a XFRM state */
+#define AUDIT_MAC_IPSEC_ADDSPD 1413    /* Add a XFRM policy */
+#define AUDIT_MAC_IPSEC_DELSPD 1414    /* Delete a XFRM policy */
 
 #define AUDIT_FIRST_KERN_ANOM_MSG   1700
 #define AUDIT_LAST_KERN_ANOM_MSG    1799
@@ -377,6 +381,7 @@ extern void auditsc_get_stamp(struct audit_context *ctx,
                              struct timespec *t, unsigned int *serial);
 extern int  audit_set_loginuid(struct task_struct *task, uid_t loginuid);
 extern uid_t audit_get_loginuid(struct audit_context *ctx);
+extern void audit_log_task_context(struct audit_buffer *ab);
 extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
 extern int audit_bprm(struct linux_binprm *bprm);
@@ -449,6 +454,7 @@ extern int audit_n_rules;
 #define audit_inode_update(i) do { ; } while (0)
 #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
 #define audit_get_loginuid(c) ({ -1; })
+#define audit_log_task_context(b) do { ; } while (0)
 #define audit_ipc_obj(i) ({ 0; })
 #define audit_ipc_set_perm(q,u,g,m) ({ 0; })
 #define audit_bprm(p) ({ 0; })
index 31e9abb6d9775246c2533e4b89a39d41c896a907..2275f2748708045eddb6a41a21de4b2a60f0677b 100644 (file)
@@ -119,8 +119,7 @@ extern void *alloc_large_system_hash(const char *tablename,
                                     unsigned int *_hash_mask,
                                     unsigned long limit);
 
-#define HASH_HIGHMEM   0x00000001      /* Consider highmem? */
-#define HASH_EARLY     0x00000002      /* Allocating during early boot? */
+#define HASH_EARLY     0x00000001      /* Allocating during early boot? */
 
 /* Only NUMA needs hash distribution.
  * IA64 is known to have sufficient vmalloc space.
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644 (file)
index 0000000..777dbf6
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BH_H
+#define _LINUX_BH_H
+
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+
+#endif /* _LINUX_BH_H */
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
deleted file mode 100644 (file)
index f6f3bd9..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
- *     Contributed by Stephane Eranian <eranian@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
- */
-#ifndef _LINUX_CARTA_RANDOM32_H_
-#define _LINUX_CARTA_RANDOM32_H_
-
-u64 carta_random32(u64 seed);
-
-#endif /* _LINUX_CARTA_RANDOM32_H_ */
index 6e27f42e3a57d12059800a088112b979aa7d5516..cb57c30081a8b0ee751d8c132b5af6bab3ac9da7 100644 (file)
@@ -80,7 +80,7 @@ typedef __u32 DriverVer_type;
 #define HWORD __u16
 #define DWORD __u32
 
-#define CISS_MAX_LUN   16      
+#define CISS_MAX_LUN   1024
 
 #define LEVEL2LUN   1   // index into Target(x) structure, due to byte swapping
 #define LEVEL3LUN   0
index ee5f53f2ca15a171c74cf3ec392b2b407dc22b8c..f309b00e986e2f6132cab0ec2d4f9ea5e3d9e7c3 100644 (file)
@@ -2,6 +2,10 @@
 #define _LINUX_CDEV_H
 #ifdef __KERNEL__
 
+#include <linux/kobject.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
 struct cdev {
        struct kobject kobj;
        struct module *owner;
index 4c02119c6ab9d6ef16158ad6f9a35f54f4493e4e..3ea1cd58de97fd7ccbba6f762f32dedca10014db 100644 (file)
@@ -133,7 +133,7 @@ struct cn_callback_data {
 struct cn_callback_entry {
        struct list_head callback_entry;
        struct cn_callback *cb;
-       struct work_struct work;
+       struct delayed_work work;
        struct cn_queue_dev *pdev;
 
        struct cn_callback_id id;
@@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev);
 
 int cn_cb_equal(struct cb_id *, struct cb_id *);
 
-void cn_queue_wrapper(void *data);
+void cn_queue_wrapper(struct work_struct *work);
 
 extern int cn_already_initialized;
 
index 3fef7d67aedcdb301fcd00309824605315f37d85..bfb520212d7109eafb9811d9df5f827dc0a9b5f6 100644 (file)
 #include <linux/compiler.h>
 #include <linux/cpumask.h>
 #include <asm/semaphore.h>
+#include <linux/mutex.h>
 
 struct cpu {
        int node_id;            /* The node which contains the CPU */
-       int no_control;         /* Should the sysfs control file be created? */
+       int hotpluggable;       /* creates sysfs control file if hotpluggable */
        struct sys_device sysdev;
 };
 
 extern int register_cpu(struct cpu *cpu, int num);
 extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+
+extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
+extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern void unregister_cpu(struct cpu *cpu);
 #endif
@@ -66,6 +75,17 @@ extern struct sysdev_class cpu_sysdev_class;
 
 #ifdef CONFIG_HOTPLUG_CPU
 /* Stop CPUs going up and down. */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{
+       mutex_lock(cpu_hp_mutex);
+}
+
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{
+       mutex_unlock(cpu_hp_mutex);
+}
+
 extern void lock_cpu_hotplug(void);
 extern void unlock_cpu_hotplug(void);
 #define hotcpu_notifier(fn, pri) {                             \
@@ -77,17 +97,24 @@ extern void unlock_cpu_hotplug(void);
 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
 int cpu_down(unsigned int cpu);
 #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-#else
+
+#else          /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{ }
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{ }
+
 #define lock_cpu_hotplug()     do { } while (0)
 #define unlock_cpu_hotplug()   do { } while (0)
 #define lock_cpu_hotplug_interruptible() 0
-#define hotcpu_notifier(fn, pri)       do { } while (0)
-#define register_hotcpu_notifier(nb)   do { } while (0)
-#define unregister_hotcpu_notifier(nb) do { } while (0)
+#define hotcpu_notifier(fn, pri)       do { (void)(fn); } while (0)
+#define register_hotcpu_notifier(nb)   do { (void)(nb); } while (0)
+#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0)
 
 /* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
 static inline int cpu_is_offline(int cpu) { return 0; }
-#endif
+#endif         /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_SUSPEND_SMP
 extern int disable_nonboot_cpus(void);
index 4d8adf6636810875b00f070dd548193e620b7015..8821e1f75b447856a041435d966eb12fb5c94941 100644 (file)
@@ -23,6 +23,7 @@ extern void cpuset_fork(struct task_struct *p);
 extern void cpuset_exit(struct task_struct *p);
 extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+#define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
 void cpuset_update_task_memory_state(void);
 #define cpuset_nodes_subset_current_mems_allowed(nodes) \
@@ -45,7 +46,7 @@ extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
 extern int cpuset_memory_pressure_enabled;
 extern void __cpuset_memory_pressure_bump(void);
 
-extern struct file_operations proc_cpuset_operations;
+extern const struct file_operations proc_cpuset_operations;
 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
 
 extern void cpuset_lock(void);
@@ -83,6 +84,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
        return node_possible_map;
 }
 
+#define cpuset_current_mems_allowed (node_online_map)
 static inline void cpuset_init_current_mems_allowed(void) {}
 static inline void cpuset_update_task_memory_state(void) {}
 #define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
index 6485e9716b36abbc9e0998a95eb35d3b313d41ae..4aa9046601da18e8c7d18a26b661197887bd6aec 100644 (file)
@@ -241,12 +241,8 @@ int crypto_unregister_alg(struct crypto_alg *alg);
  * Algorithm query interface.
  */
 #ifdef CONFIG_CRYPTO
-int crypto_alg_available(const char *name, u32 flags)
-       __deprecated_for_modules;
 int crypto_has_alg(const char *name, u32 type, u32 mask);
 #else
-static int crypto_alg_available(const char *name, u32 flags)
-       __deprecated_for_modules;
 static inline int crypto_alg_available(const char *name, u32 flags)
 {
        return 0;
@@ -707,16 +703,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
                                                dst, src);
 }
 
-void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
-void crypto_digest_update(struct crypto_tfm *tfm,
-                         struct scatterlist *sg, unsigned int nsg)
-       __deprecated_for_modules;
-void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
-       __deprecated_for_modules;
-void crypto_digest_digest(struct crypto_tfm *tfm,
-                         struct scatterlist *sg, unsigned int nsg, u8 *out)
-       __deprecated_for_modules;
-
 static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
 {
        return (struct crypto_hash *)tfm;
@@ -729,14 +715,6 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
        return __crypto_hash_cast(tfm);
 }
 
-static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
-                               unsigned int keylen) __deprecated;
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
-                                       const u8 *key, unsigned int keylen)
-{
-       return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
-}
-
 static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
                                                    u32 type, u32 mask)
 {
index 952bee79a8f30902328277a6d16920a53e4da12f..a1c10b0c4cf02ae072eea67d312dbd21c236557f 100644 (file)
@@ -24,7 +24,7 @@ extern int debug_locks_off(void);
        int __ret = 0;                                                  \
                                                                        \
        if (unlikely(c)) {                                              \
-               if (debug_locks_off())                                  \
+               if (debug_locks_silent || debug_locks_off())            \
                        WARN_ON(1);                                     \
                __ret = 1;                                              \
        }                                                               \
index 561e2a77805c582fdebcd3d588a222d8adec9a6c..55d1ca5e60f54f11efcfe7078a5e02cf3bb450fd 100644 (file)
@@ -30,7 +30,7 @@
 #ifdef CONFIG_TASK_DELAY_ACCT
 
 extern int delayacct_on;       /* Delay accounting turned on/off */
-extern kmem_cache_t *delayacct_cache;
+extern struct kmem_cache *delayacct_cache;
 extern void delayacct_init(void);
 extern void __delayacct_tsk_init(struct task_struct *);
 extern void __delayacct_tsk_exit(struct task_struct *);
index 583a341e016cea229ad8b04d4213d59974e3cbd3..49ab53ce92dc5d79a55218192de5a9b60d8934a0 100644 (file)
@@ -371,6 +371,9 @@ struct device {
                                           core doesn't touch it */
        struct dev_pm_info      power;
 
+#ifdef CONFIG_NUMA
+       int             numa_node;      /* NUMA node this device is close to */
+#endif
        u64             *dma_mask;      /* dma mask (if dma'able device) */
        u64             coherent_dma_mask;/* Like dma_mask, but for
                                             alloc_coherent mappings as
@@ -394,6 +397,25 @@ struct device {
        void    (*release)(struct device * dev);
 };
 
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+       return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+       dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+       return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
 static inline void *
 dev_get_drvdata (struct device *dev)
 {
index 66d621dbcb6c850c9a2042338ba0732f79c5f637..df1c91855f0efa1336181a4552e9b6fa7cfb725e 100644 (file)
@@ -300,8 +300,9 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
 extern int __init efi_uart_console_only (void);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
                                        struct resource *data_resource);
-extern unsigned long __init efi_get_time(void);
+extern unsigned long efi_get_time(void);
 extern int __init efi_set_rtc_mmss(unsigned long nowtime);
+extern int is_available_memory(efi_memory_desc_t * md);
 extern struct efi_memory_map memmap;
 
 /**
index b70d1d2c8d2859cf82c8b39435e40a126cf3b8a9..60713e6ea2974f926b7cc52b9e2df143805bea32 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/elf-em.h>
 #include <asm/elf.h>
 
+struct file;
+
 #ifndef elf_read_implies_exec
   /* Executables for which elf_read_implies_exec() returns TRUE will
      have the READ_IMPLIES_EXEC personality flag set automatically.
@@ -358,6 +360,7 @@ extern Elf32_Dyn _DYNAMIC [];
 #define elfhdr         elf32_hdr
 #define elf_phdr       elf32_phdr
 #define elf_note       elf32_note
+#define elf_addr_t     Elf32_Off
 
 #else
 
@@ -365,8 +368,16 @@ extern Elf64_Dyn _DYNAMIC [];
 #define elfhdr         elf64_hdr
 #define elf_phdr       elf64_phdr
 #define elf_note       elf64_note
+#define elf_addr_t     Elf64_Off
 
 #endif
 
+#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+static inline int arch_notes_size(void) { return 0; }
+static inline void arch_write_notes(struct file *file) { }
+
+#define ELF_CORE_EXTRA_NOTES_SIZE arch_notes_size()
+#define ELF_CORE_WRITE_EXTRA_NOTES arch_write_notes(file)
+#endif /* ARCH_HAVE_EXTRA_ELF_NOTES */
 
 #endif /* _LINUX_ELF_H */
index ce0e6109aff0cdb83e05cd350909da82b5a50d99..8c43b13a02fe1df033f161ab91d69cdd7a389858 100644 (file)
@@ -109,74 +109,32 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
  * been done yet.
  */
 
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-               struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext3_journal_get_undo_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh)
+static inline void ext3_journal_release_buffer(handle_t *handle,
+                                               struct buffer_head *bh)
 {
-       int err = journal_get_undo_access(handle, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
+       journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext3_journal_get_write_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh)
-{
-       int err = journal_get_write_access(handle, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+               struct buffer_head *bh, handle_t *handle, int err);
 
-static inline void
-ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-       journal_release_buffer(handle, bh);
-}
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-       int err = journal_forget(handle, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext3_journal_revoke(const char *where, handle_t *handle,
-                     unsigned long blocknr, struct buffer_head *bh)
-{
-       int err = journal_revoke(handle, blocknr, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext3_journal_forget(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext3_journal_get_create_access(const char *where,
-                                handle_t *handle, struct buffer_head *bh)
-{
-       int err = journal_get_create_access(handle, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+                               unsigned long blocknr, struct buffer_head *bh);
 
-static inline int
-__ext3_journal_dirty_metadata(const char *where,
-                             handle_t *handle, struct buffer_head *bh)
-{
-       int err = journal_dirty_metadata(handle, bh);
-       if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext3_journal_get_create_access(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
 
+int __ext3_journal_dirty_metadata(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
 
 #define ext3_journal_get_undo_access(handle, bh) \
        __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
index 72dd631912e4aaee12ed65e6fb343bb7dfb5f4e0..d716e6392cf6e7e5d6215da354b20a1c96e1d495 100644 (file)
@@ -114,74 +114,32 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
  * been done yet.
  */
 
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
-               struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext4_journal_get_undo_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh)
+static inline void ext4_journal_release_buffer(handle_t *handle,
+                                               struct buffer_head *bh)
 {
-       int err = jbd2_journal_get_undo_access(handle, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
+       jbd2_journal_release_buffer(handle, bh);
 }
 
-static inline int
-__ext4_journal_get_write_access(const char *where, handle_t *handle,
-                               struct buffer_head *bh)
-{
-       int err = jbd2_journal_get_write_access(handle, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+               struct buffer_head *bh, handle_t *handle, int err);
 
-static inline void
-ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-       jbd2_journal_release_buffer(handle, bh);
-}
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
-       int err = jbd2_journal_forget(handle, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext4_journal_revoke(const char *where, handle_t *handle,
-                     ext4_fsblk_t blocknr, struct buffer_head *bh)
-{
-       int err = jbd2_journal_revoke(handle, blocknr, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext4_journal_forget(const char *where, handle_t *handle,
+                               struct buffer_head *bh);
 
-static inline int
-__ext4_journal_get_create_access(const char *where,
-                                handle_t *handle, struct buffer_head *bh)
-{
-       int err = jbd2_journal_get_create_access(handle, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+                               ext4_fsblk_t blocknr, struct buffer_head *bh);
 
-static inline int
-__ext4_journal_dirty_metadata(const char *where,
-                             handle_t *handle, struct buffer_head *bh)
-{
-       int err = jbd2_journal_dirty_metadata(handle, bh);
-       if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
-       return err;
-}
+int __ext4_journal_get_create_access(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
 
+int __ext4_journal_dirty_metadata(const char *where,
+                               handle_t *handle, struct buffer_head *bh);
 
 #define ext4_journal_get_undo_access(handle, bh) \
        __ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
index 74183e6f7f458052c72d0bf3e8297c83a32ff18c..6e77b9177f9e86ffd552fd9dcae491bc6715a6b1 100644 (file)
@@ -64,6 +64,8 @@ struct files_struct {
 
 #define files_fdtable(files) (rcu_dereference((files)->fdt))
 
+extern struct kmem_cache *filp_cachep;
+
 extern void FASTCALL(__fput(struct file *));
 extern void FASTCALL(fput(struct file *));
 
@@ -114,4 +116,6 @@ struct files_struct *get_files_struct(struct task_struct *);
 void FASTCALL(put_files_struct(struct files_struct *fs));
 void reset_files_struct(struct task_struct *, struct files_struct *);
 
+extern struct kmem_cache *files_cachep;
+
 #endif /* __LINUX_FILE_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644 (file)
index 0000000..6e05e3e
--- /dev/null
@@ -0,0 +1,87 @@
+/* Freezer declarations */
+
+#ifdef CONFIG_PM
+/*
+ * Check if a process has been frozen
+ */
+static inline int frozen(struct task_struct *p)
+{
+       return p->flags & PF_FROZEN;
+}
+
+/*
+ * Check if there is a request to freeze a process
+ */
+static inline int freezing(struct task_struct *p)
+{
+       return p->flags & PF_FREEZE;
+}
+
+/*
+ * Request that a process be frozen
+ * FIXME: SMP problem. We may not modify other process' flags!
+ */
+static inline void freeze(struct task_struct *p)
+{
+       p->flags |= PF_FREEZE;
+}
+
+/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+       p->flags &= ~PF_FREEZE;
+}
+
+/*
+ * Wake up a frozen process
+ */
+static inline int thaw_process(struct task_struct *p)
+{
+       if (frozen(p)) {
+               p->flags &= ~PF_FROZEN;
+               wake_up_process(p);
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * freezing is complete, mark process as frozen
+ */
+static inline void frozen_process(struct task_struct *p)
+{
+       p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
+}
+
+extern void refrigerator(void);
+extern int freeze_processes(void);
+extern void thaw_processes(void);
+
+static inline int try_to_freeze(void)
+{
+       if (freezing(current)) {
+               refrigerator();
+               return 1;
+       } else
+               return 0;
+}
+
+extern void thaw_some_processes(int all);
+
+#else
+static inline int frozen(struct task_struct *p) { return 0; }
+static inline int freezing(struct task_struct *p) { return 0; }
+static inline void freeze(struct task_struct *p) { BUG(); }
+static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline void frozen_process(struct task_struct *p) { BUG(); }
+
+static inline void refrigerator(void) {}
+static inline int freeze_processes(void) { BUG(); return 0; }
+static inline void thaw_processes(void) {}
+
+static inline int try_to_freeze(void) { return 0; }
+
+
+#endif
index cac7b1ef95435d5d1d2f67ae1bb0e1c8d2c51a7c..70b99fbb560bc4aba52e0013900d5534b77a6038 100644 (file)
@@ -543,19 +543,22 @@ struct inode {
        struct list_head        i_dentry;
        unsigned long           i_ino;
        atomic_t                i_count;
-       umode_t                 i_mode;
        unsigned int            i_nlink;
        uid_t                   i_uid;
        gid_t                   i_gid;
        dev_t                   i_rdev;
+       unsigned long           i_version;
        loff_t                  i_size;
+#ifdef __NEED_I_SIZE_ORDERED
+       seqcount_t              i_size_seqcount;
+#endif
        struct timespec         i_atime;
        struct timespec         i_mtime;
        struct timespec         i_ctime;
        unsigned int            i_blkbits;
-       unsigned long           i_version;
        blkcnt_t                i_blocks;
        unsigned short          i_bytes;
+       umode_t                 i_mode;
        spinlock_t              i_lock; /* i_blocks, i_bytes, maybe i_size */
        struct mutex            i_mutex;
        struct rw_semaphore     i_alloc_sem;
@@ -598,9 +601,6 @@ struct inode {
        void                    *i_security;
 #endif
        void                    *i_private; /* fs or device private pointer */
-#ifdef __NEED_I_SIZE_ORDERED
-       seqcount_t              i_size_seqcount;
-#endif
 };
 
 /*
@@ -636,7 +636,7 @@ extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
  * cmpxchg8b without the need of the lock prefix). For SMP compiles
  * and 64bit archs it makes no difference if preempt is enabled or not.
  */
-static inline loff_t i_size_read(struct inode *inode)
+static inline loff_t i_size_read(const struct inode *inode)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
        loff_t i_size;
@@ -679,12 +679,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
 #endif
 }
 
-static inline unsigned iminor(struct inode *inode)
+static inline unsigned iminor(const struct inode *inode)
 {
        return MINOR(inode->i_rdev);
 }
 
-static inline unsigned imajor(struct inode *inode)
+static inline unsigned imajor(const struct inode *inode)
 {
        return MAJOR(inode->i_rdev);
 }
@@ -1481,7 +1481,9 @@ extern char * getname(const char __user *);
 extern void __init vfs_caches_init_early(void);
 extern void __init vfs_caches_init(unsigned long);
 
-#define __getname()    kmem_cache_alloc(names_cachep, SLAB_KERNEL)
+extern struct kmem_cache *names_cachep;
+
+#define __getname()    kmem_cache_alloc(names_cachep, GFP_KERNEL)
 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
 #ifndef CONFIG_AUDITSYSCALL
 #define putname(name)   __putname(name)
index c623d12a486e6e1deb90a6ab65d4b71e6b81aab3..11a36ceddf73a1e8670c1efb81a10cd8c0bf3e67 100644 (file)
@@ -18,6 +18,8 @@ struct fs_struct {
        .umask          = 0022, \
 }
 
+extern struct kmem_cache *fs_cachep;
+
 extern void exit_fs(struct task_struct *);
 extern void set_fs_altroot(void);
 extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
index 9fc48a674b82dc31c78aeb438b88862a9d28fbab..534744efe30d764abaf8c8aa10baa03821153c24 100644 (file)
@@ -15,7 +15,7 @@
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 7
+#define FUSE_KERNEL_MINOR_VERSION 8
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -92,6 +92,11 @@ struct fuse_file_lock {
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
 
+/**
+ * Release flags
+ */
+#define FUSE_RELEASE_FLUSH     (1 << 0)
+
 enum fuse_opcode {
        FUSE_LOOKUP        = 1,
        FUSE_FORGET        = 2,  /* no reply */
@@ -127,6 +132,8 @@ enum fuse_opcode {
        FUSE_ACCESS        = 34,
        FUSE_CREATE        = 35,
        FUSE_INTERRUPT     = 36,
+       FUSE_BMAP          = 37,
+       FUSE_DESTROY       = 38,
 };
 
 /* The read buffer is required to be at least 8k, but may be much larger */
@@ -205,12 +212,13 @@ struct fuse_open_out {
 struct fuse_release_in {
        __u64   fh;
        __u32   flags;
-       __u32   padding;
+       __u32   release_flags;
+       __u64   lock_owner;
 };
 
 struct fuse_flush_in {
        __u64   fh;
-       __u32   flush_flags;
+       __u32   unused;
        __u32   padding;
        __u64   lock_owner;
 };
@@ -296,6 +304,16 @@ struct fuse_interrupt_in {
        __u64   unique;
 };
 
+struct fuse_bmap_in {
+       __u64   block;
+       __u32   blocksize;
+       __u32   padding;
+};
+
+struct fuse_bmap_out {
+       __u64   block;
+};
+
 struct fuse_in_header {
        __u32   len;
        __u32   opcode;
index 9049dc65ae51b6b34751c4011e43e36646d56bcb..f7a93770e1be864cd6f1ddb152784597fdac0101 100644 (file)
@@ -17,6 +17,9 @@ struct genlmsghdr {
 #define GENL_HDRLEN    NLMSG_ALIGN(sizeof(struct genlmsghdr))
 
 #define GENL_ADMIN_PERM                0x01
+#define GENL_CMD_CAP_DO                0x02
+#define GENL_CMD_CAP_DUMP      0x04
+#define GENL_CMD_CAP_HASPOL    0x08
 
 /*
  * List of reserved static generic netlink identifiers:
@@ -58,9 +61,6 @@ enum {
        CTRL_ATTR_OP_UNSPEC,
        CTRL_ATTR_OP_ID,
        CTRL_ATTR_OP_FLAGS,
-       CTRL_ATTR_OP_POLICY,
-       CTRL_ATTR_OP_DOIT,
-       CTRL_ATTR_OP_DUMPIT,
        __CTRL_ATTR_OP_MAX,
 };
 
index bf2b6bc3f6fd4fc35dec4dc8072b26c160458dfe..00c314aedab7d8f9487112e9a4d16182185b6332 100644 (file)
@@ -116,6 +116,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
 #ifndef HAVE_ARCH_FREE_PAGE
 static inline void arch_free_page(struct page *page, int order) { }
 #endif
+#ifndef HAVE_ARCH_ALLOC_PAGE
+static inline void arch_alloc_page(struct page *page, int order) { }
+#endif
 
 extern struct page *
 FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
index a7ae7c177cacbb3044ed372976f09b7cfe48338f..8b7e4c1e32ae2c8f80d2f8182e2452add7c18451 100644 (file)
@@ -54,8 +54,13 @@ struct gfs2_inum {
        __be64 no_addr;
 };
 
-static inline int gfs2_inum_equal(const struct gfs2_inum *ino1,
-                                 const struct gfs2_inum *ino2)
+struct gfs2_inum_host {
+       __u64 no_formal_ino;
+       __u64 no_addr;
+};
+
+static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
+                                 const struct gfs2_inum_host *ino2)
 {
        return ino1->no_formal_ino == ino2->no_formal_ino &&
               ino1->no_addr == ino2->no_addr;
@@ -89,6 +94,12 @@ struct gfs2_meta_header {
        __be32 __pad1;          /* Was incarnation number in gfs1 */
 };
 
+struct gfs2_meta_header_host {
+       __u32 mh_magic;
+       __u32 mh_type;
+       __u32 mh_format;
+};
+
 /*
  * super-block structure
  *
@@ -128,6 +139,23 @@ struct gfs2_sb {
        /* In gfs1, quota and license dinodes followed */
 };
 
+struct gfs2_sb_host {
+       struct gfs2_meta_header_host sb_header;
+
+       __u32 sb_fs_format;
+       __u32 sb_multihost_format;
+
+       __u32 sb_bsize;
+       __u32 sb_bsize_shift;
+
+       struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
+       struct gfs2_inum_host sb_root_dir;
+
+       char sb_lockproto[GFS2_LOCKNAME_LEN];
+       char sb_locktable[GFS2_LOCKNAME_LEN];
+       /* In gfs1, quota and license dinodes followed */
+};
+
 /*
  * resource index structure
  */
@@ -145,6 +173,14 @@ struct gfs2_rindex {
        __u8 ri_reserved[64];
 };
 
+struct gfs2_rindex_host {
+       __u64 ri_addr;  /* grp block disk address */
+       __u64 ri_data0; /* first data location */
+       __u32 ri_length;        /* length of rgrp header in fs blocks */
+       __u32 ri_data;  /* num of data blocks in rgrp */
+       __u32 ri_bitbytes;      /* number of bytes in data bitmaps */
+};
+
 /*
  * resource group header structure
  */
@@ -176,6 +212,13 @@ struct gfs2_rgrp {
        __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
 };
 
+struct gfs2_rgrp_host {
+       __u32 rg_flags;
+       __u32 rg_free;
+       __u32 rg_dinodes;
+       __u64 rg_igeneration;
+};
+
 /*
  * quota structure
  */
@@ -187,6 +230,12 @@ struct gfs2_quota {
        __u8 qu_reserved[64];
 };
 
+struct gfs2_quota_host {
+       __u64 qu_limit;
+       __u64 qu_warn;
+       __u64 qu_value;
+};
+
 /*
  * dinode structure
  */
@@ -270,6 +319,27 @@ struct gfs2_dinode {
        __u8 di_reserved[56];
 };
 
+struct gfs2_dinode_host {
+       __u64 di_size;  /* number of bytes in file */
+       __u64 di_blocks;        /* number of blocks in file */
+
+       /* This section varies from gfs1. Padding added to align with
+         * remainder of dinode
+        */
+       __u64 di_goal_meta;     /* rgrp to alloc from next */
+       __u64 di_goal_data;     /* data block goal */
+       __u64 di_generation;    /* generation number for NFS */
+
+       __u32 di_flags; /* GFS2_DIF_... */
+       __u16 di_height;        /* height of metadata */
+
+       /* These only apply to directories  */
+       __u16 di_depth; /* Number of bits in the table */
+       __u32 di_entries;       /* The number of entries in the directory */
+
+       __u64 di_eattr; /* extended attribute block number */
+};
+
 /*
  * directory structure - many of these per directory file
  */
@@ -344,6 +414,16 @@ struct gfs2_log_header {
        __be32 lh_hash;
 };
 
+struct gfs2_log_header_host {
+       struct gfs2_meta_header_host lh_header;
+
+       __u64 lh_sequence;      /* Sequence number of this transaction */
+       __u32 lh_flags; /* GFS2_LOG_HEAD_... */
+       __u32 lh_tail;          /* Block number of log tail */
+       __u32 lh_blkno;
+       __u32 lh_hash;
+};
+
 /*
  * Log type descriptor
  */
@@ -384,6 +464,11 @@ struct gfs2_inum_range {
        __be64 ir_length;
 };
 
+struct gfs2_inum_range_host {
+       __u64 ir_start;
+       __u64 ir_length;
+};
+
 /*
  * Statfs change
  * Describes an change to the pool of free and allocated
@@ -396,6 +481,12 @@ struct gfs2_statfs_change {
        __be64 sc_dinodes;
 };
 
+struct gfs2_statfs_change_host {
+       __u64 sc_total;
+       __u64 sc_free;
+       __u64 sc_dinodes;
+};
+
 /*
  * Quota change
  * Describes an allocation change for a particular
@@ -410,33 +501,38 @@ struct gfs2_quota_change {
        __be32 qc_id;
 };
 
+struct gfs2_quota_change_host {
+       __u64 qc_change;
+       __u32 qc_flags; /* GFS2_QCF_... */
+       __u32 qc_id;
+};
+
 #ifdef __KERNEL__
 /* Translation functions */
 
-extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf);
-extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf);
-extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf);
-extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf);
-extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf);
-extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf);
-extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf);
-extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf);
-extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf);
-extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf);
-extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
+extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
+extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
+extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
+extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
+extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
+extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
+extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
+extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
+struct gfs2_inode;
+extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
 extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
 extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
-extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf);
-extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf);
-extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf);
-extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf);
+extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
+extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
+extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
+extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
+extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
+extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
 
 /* Printing functions */
 
-extern void gfs2_rindex_print(const struct gfs2_rindex *ri);
-extern void gfs2_dinode_print(const struct gfs2_dinode *di);
+extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
+extern void gfs2_dinode_print(const struct gfs2_inode *ip);
 
 #endif /* __KERNEL__ */
 
index fd7d12daa94ff3e0c066ccc789b6878739c9a022..3d8768b619e9ef71f71e60ae90205971298ffef8 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
 
@@ -41,9 +42,10 @@ static inline void *kmap(struct page *page)
 
 #define kunmap(page) do { (void) (page); } while (0)
 
-#define kmap_atomic(page, idx)         page_address(page)
-#define kunmap_atomic(addr, idx)       do { } while (0)
-#define kmap_atomic_pfn(pfn, idx)      page_address(pfn_to_page(pfn))
+#define kmap_atomic(page, idx) \
+       ({ pagefault_disable(); page_address(page); })
+#define kunmap_atomic(addr, idx)       do { pagefault_enable(); } while (0)
+#define kmap_atomic_pfn(pfn, idx)      kmap_atomic(pfn_to_page(pfn), (idx))
 #define kmap_atomic_to_page(ptr)       virt_to_page(ptr)
 #endif
 
index ace64e57e17f4291a813c7a9f61d1779fb560d43..a60995afe3348a7ce3ebd0808252679a17164c98 100644 (file)
@@ -35,6 +35,7 @@ extern int sysctl_hugetlb_shm_group;
 
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
index c115e9e840b4e051b7b573a37c27bfc4564ad504..52f53e2e70c3c2cf6e89e8069f93dddbec24c806 100644 (file)
@@ -461,7 +461,7 @@ struct i2o_driver {
        int (*reply) (struct i2o_controller *, u32, struct i2o_message *);
 
        /* Event handler */
-       void (*event) (struct i2o_event *);
+       work_func_t event;
 
        struct workqueue_struct *event_queue;   /* Event queue */
 
@@ -490,7 +490,7 @@ struct i2o_dma {
  */
 struct i2o_pool {
        char *name;
-       kmem_cache_t *slab;
+       struct kmem_cache *slab;
        mempool_t *mempool;
 };
 
@@ -986,7 +986,8 @@ extern void i2o_driver_unregister(struct i2o_driver *);
 
 /**
  *     i2o_driver_notify_controller_add - Send notification of added controller
- *                                        to a single I2O driver
+ *     @drv: I2O driver
+ *     @c: I2O controller
  *
  *     Send notification of added controller to a single registered driver.
  */
@@ -998,8 +999,9 @@ static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
 };
 
 /**
- *     i2o_driver_notify_controller_remove - Send notification of removed
- *                                           controller to a single I2O driver
+ *     i2o_driver_notify_controller_remove - Send notification of removed controller
+ *     @drv: I2O driver
+ *     @c: I2O controller
  *
  *     Send notification of removed controller to a single registered driver.
  */
@@ -1011,8 +1013,9 @@ static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
 };
 
 /**
- *     i2o_driver_notify_device_add - Send notification of added device to a
- *                                    single I2O driver
+ *     i2o_driver_notify_device_add - Send notification of added device
+ *     @drv: I2O driver
+ *     @i2o_dev: the added i2o_device
  *
  *     Send notification of added device to a single registered driver.
  */
@@ -1025,7 +1028,8 @@ static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
 
 /**
  *     i2o_driver_notify_device_remove - Send notification of removed device
- *                                       to a single I2O driver
+ *     @drv: I2O driver
+ *     @i2o_dev: the added i2o_device
  *
  *     Send notification of removed device to a single registered driver.
  */
@@ -1148,7 +1152,7 @@ static inline void i2o_msg_post(struct i2o_controller *c,
 /**
  *     i2o_msg_post_wait - Post and wait a message and wait until return
  *     @c: controller
- *     @m: message to post
+ *     @msg: message to post
  *     @timeout: time in seconds to wait
  *
  *     This API allows an OSM to post a message and then be told whether or
index 33c5daacc74338dd0d479f976892659ab308eab8..733790d4f7db881a88eddcb3f8a4d31204236a24 100644 (file)
@@ -73,7 +73,7 @@
 extern struct nsproxy init_nsproxy;
 #define INIT_NSPROXY(nsproxy) {                                                \
        .count          = ATOMIC_INIT(1),                               \
-       .nslock         = SPIN_LOCK_UNLOCKED,                           \
+       .nslock         = __SPIN_LOCK_UNLOCKED(nsproxy.nslock),         \
        .uts_ns         = &init_uts_ns,                                 \
        .namespace      = NULL,                                         \
        INIT_IPC_NS(ipc_ns)                                             \
index 5b83e7b59621e22e365da7cd923921846e1d987f..de7593f4e895b4bfaa77c7e22051e372a5b5a48c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/hardirq.h>
 #include <linux/sched.h>
 #include <linux/irqflags.h>
+#include <linux/bottom_half.h>
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
@@ -217,12 +218,6 @@ static inline void __deprecated save_and_cli(unsigned long *x)
 #define save_and_cli(x)        save_and_cli(&x)
 #endif /* CONFIG_SMP */
 
-extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
-extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
-
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
    frequency threaded job scheduling. For almost all the purposes
    tasklets are more than enough. F.e. all serial device BHs et
index 796ca009fd468a8716cd8d6b31570bcc83ca4faf..7a9db390c56a491e3221ed8154ac76ed89295c8f 100644 (file)
@@ -208,6 +208,15 @@ struct kernel_ipmi_msg
    code as the first byte of the incoming data, unlike a response. */
 
 
+/*
+ * Modes for ipmi_set_maint_mode() and the userland IOCTL.  The AUTO
+ * setting is the default and means it will be set on certain
+ * commands.  Hard setting it on and off will override automatic
+ * operation.
+ */
+#define IPMI_MAINTENANCE_MODE_AUTO     0
+#define IPMI_MAINTENANCE_MODE_OFF      1
+#define IPMI_MAINTENANCE_MODE_ON       2
 
 #ifdef __KERNEL__
 
@@ -373,6 +382,35 @@ int ipmi_unregister_for_cmd(ipmi_user_t   user,
                            unsigned char cmd,
                            unsigned int  chans);
 
+/*
+ * Go into a mode where the driver will not autonomously attempt to do
+ * things with the interface.  It will still respond to attentions and
+ * interrupts, and it will expect that commands will complete.  It
+ * will not automatcially check for flags, events, or things of that
+ * nature.
+ *
+ * This is primarily used for firmware upgrades.  The idea is that
+ * when you go into firmware upgrade mode, you do this operation
+ * and the driver will not attempt to do anything but what you tell
+ * it or what the BMC asks for.
+ *
+ * Note that if you send a command that resets the BMC, the driver
+ * will still expect a response from that command.  So the BMC should
+ * reset itself *after* the response is sent.  Resetting before the
+ * response is just silly.
+ *
+ * If in auto maintenance mode, the driver will automatically go into
+ * maintenance mode for 30 seconds if it sees a cold reset, a warm
+ * reset, or a firmware NetFN.  This means that code that uses only
+ * firmware NetFN commands to do upgrades will work automatically
+ * without change, assuming it sends a message every 30 seconds or
+ * less.
+ *
+ * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
+ */
+int ipmi_get_maintenance_mode(ipmi_user_t user);
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+
 /*
  * Allow run-to-completion mode to be set for the interface of
  * a specific user.
@@ -656,4 +694,11 @@ struct ipmi_timing_parms
 #define IPMICTL_GET_TIMING_PARMS_CMD   _IOR(IPMI_IOC_MAGIC, 23, \
                                             struct ipmi_timing_parms)
 
+/*
+ * Set the maintenance mode.  See ipmi_set_maintenance_mode() above
+ * for a description of what this does.
+ */
+#define IPMICTL_GET_MAINTENANCE_MODE_CMD       _IOR(IPMI_IOC_MAGIC, 30, int)
+#define IPMICTL_SET_MAINTENANCE_MODE_CMD       _IOW(IPMI_IOC_MAGIC, 31, int)
+
 #endif /* __LINUX_IPMI_H */
index 4d04d8b58a0a55a13a59ef9055cdd5eac81e4d08..b56a158d587a50e0c9ab0c8712f308bf01ecda74 100644 (file)
@@ -46,6 +46,8 @@
 #define IPMI_NETFN_APP_REQUEST                 0x06
 #define IPMI_NETFN_APP_RESPONSE                        0x07
 #define IPMI_GET_DEVICE_ID_CMD         0x01
+#define IPMI_COLD_RESET_CMD            0x02
+#define IPMI_WARM_RESET_CMD            0x03
 #define IPMI_CLEAR_MSG_FLAGS_CMD       0x30
 #define IPMI_GET_DEVICE_GUID_CMD       0x08
 #define IPMI_GET_MSG_FLAGS_CMD         0x31
 #define IPMI_NETFN_STORAGE_RESPONSE            0x0b
 #define IPMI_ADD_SEL_ENTRY_CMD         0x44
 
+#define IPMI_NETFN_FIRMWARE_REQUEST            0x08
+#define IPMI_NETFN_FIRMWARE_RESPONSE           0x09
+
 /* The default slave address */
 #define IPMI_BMC_SLAVE_ADDR    0x20
 
 /* The BT interface on high-end HP systems supports up to 255 bytes in
  * one transfer.  Its "virtual" BMC supports some commands that are longer
  * than 128 bytes.  Use the full 256, plus NetFn/LUN, Cmd, cCode, plus
- * some overhead.  It would be nice to base this on the "BT Capabilities"
- * but that's too hard to propagate to the rest of the driver. */
+ * some overhead; it's not worth the effort to dynamically size this based
+ * on the results of the "Get BT Capabilities" command. */
 #define IPMI_MAX_MSG_LENGTH    272     /* multiple of 16 */
 
 #define IPMI_CC_NO_ERROR               0x00
 #define IPMI_NODE_BUSY_ERR             0xc0
 #define IPMI_INVALID_COMMAND_ERR       0xc1
+#define IPMI_TIMEOUT_ERR               0xc3
 #define IPMI_ERR_MSG_TRUNCATED         0xc6
+#define IPMI_REQ_LEN_INVALID_ERR       0xc7
+#define IPMI_REQ_LEN_EXCEEDED_ERR      0xc8
+#define IPMI_NOT_IN_MY_STATE_ERR       0xd5    /* IPMI 2.0 */
 #define IPMI_LOST_ARBITRATION_ERR      0x81
 #define IPMI_BUS_ERR                   0x82
 #define IPMI_NAK_ON_WRITE_ERR          0x83
index 6d9c7e4da4720a538fcc7f97ca30240d70cbabcf..c0633108d05dc3c4f5addcee52b5d44a66d86af4 100644 (file)
@@ -115,6 +115,13 @@ struct ipmi_smi_handlers
           poll for operations during things like crash dumps. */
        void (*poll)(void *send_info);
 
+       /* Enable/disable firmware maintenance mode.  Note that this
+          is *not* the modes defined, this is simply an on/off
+          setting.  The message handler does the mode handling.  Note
+          that this is called from interupt context, so it cannot
+          block. */
+       void (*set_maintenance_mode)(void *send_info, int enable);
+
        /* Tell the handler that we are using it/not using it.  The
           message handler get the modules that this handler belongs
           to; this function lets the SMI claim any modules that it
@@ -173,6 +180,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
                      void                     *send_info,
                      struct ipmi_device_id    *device_id,
                      struct device            *dev,
+                     const char               *sysfs_name,
                      unsigned char            slave_addr);
 
 /*
index fe89444b1c6f32145f4a0a02d1e8f13258e3f299..452737551260ef1663caa93d6ca5d7526dacd275 100644 (file)
@@ -839,7 +839,6 @@ struct journal_s
  */
 
 /* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
 extern void journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __journal_unfile_buffer(struct journal_head *);
 extern void __journal_refile_buffer(struct journal_head *);
@@ -949,7 +948,7 @@ void journal_put_journal_head(struct journal_head *jh);
 /*
  * handle management
  */
-extern kmem_cache_t *jbd_handle_cache;
+extern struct kmem_cache *jbd_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
index ddb1287957817ced495fe662a235f93b704bcd6c..0e0fedd2039a3866a51fe093fb6792ef527c1804 100644 (file)
@@ -848,7 +848,6 @@ struct journal_s
  */
 
 /* Filing buffers */
-extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
 extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
 extern void __jbd2_journal_unfile_buffer(struct journal_head *);
 extern void __jbd2_journal_refile_buffer(struct journal_head *);
@@ -958,7 +957,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
 /*
  * handle management
  */
-extern kmem_cache_t *jbd2_handle_cache;
+extern struct kmem_cache *jbd2_handle_cache;
 
 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
 {
index efe0ee4cc80baee508567f08c72b3f7d03277e43..06c58c423fe17668987839f71695c8bf64983a49 100644 (file)
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t)
        if (t->buf.tail != NULL)
                t->buf.tail->commit = t->buf.tail->used;
        spin_unlock_irqrestore(&t->buf.lock, flags);
-       schedule_work(&t->buf.work);
+       schedule_delayed_work(&t->buf.work, 0);
 }
 
 #endif
index a4ede62b339d360b7845933b913e11ac6084a157..e3abcec6c51c924f32d3181ac8038767034e407e 100644 (file)
@@ -105,6 +105,7 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image,
                                                unsigned int order);
 extern void crash_kexec(struct pt_regs *);
 int kexec_should_crash(struct task_struct *);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
 
index ac4c0559f7510e16c8b2c6113964284f762fcc9d..769be39b96810414ebdec4fec05d94e997eba5eb 100644 (file)
@@ -165,7 +165,7 @@ extern void arch_disarm_kprobe(struct kprobe *p);
 extern int arch_init_kprobes(void);
 extern void show_registers(struct pt_regs *regs);
 extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot);
+extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 
 /* Get the kprobe at this addr (if any) - called with preemption disabled */
index 84eeecd60a02644a6cc64563747a594b8bafbb54..611f17f79eefe32f78605589f175db8a77616474 100644 (file)
@@ -248,9 +248,9 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
  *
  * Returns the scalar nanoseconds representation of kt
  */
-static inline u64 ktime_to_ns(const ktime_t kt)
+static inline s64 ktime_to_ns(const ktime_t kt)
 {
-       return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
+       return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
 }
 
 #endif
index 202283b5df96b9ebaaf2a035261a391d39509840..ab2754830322f96a5e1a80538104fa143a6028b1 100644 (file)
@@ -575,8 +575,9 @@ struct ata_port {
        struct ata_host         *host;
        struct device           *dev;
 
-       struct work_struct      port_task;
-       struct work_struct      hotplug_task;
+       void                    *port_task_data;
+       struct delayed_work     port_task;
+       struct delayed_work     hotplug_task;
        struct work_struct      scsi_rescan_task;
 
        unsigned int            hsm_task_state;
@@ -755,7 +756,7 @@ extern void ata_host_resume(struct ata_host *host);
 extern int ata_ratelimit(void);
 extern int ata_busy_sleep(struct ata_port *ap,
                          unsigned long timeout_pat, unsigned long timeout);
-extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
+extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
                                void *data, unsigned long delay);
 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
                             unsigned long interval_msec,
index 862d9730a60dd4e4b4de43d60363c4434b11f94a..8c39654549d8cc00aee0d74af2f3bf17efb9c944 100644 (file)
@@ -164,14 +164,12 @@ void                nlmclnt_next_cookie(struct nlm_cookie *);
  */
 struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
 struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
-struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void             nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
 void             nlm_release_host(struct nlm_host *);
 void             nlm_shutdown_hosts(void);
 extern void      nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
-struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
 void             nsm_release(struct nsm_handle *);
 
 
index 819f08f1310db878f753e7656da2ce32590a5a69..498bfbd3b4e1acfdd0ba200e21e6a91c3eab2e3f 100644 (file)
@@ -193,7 +193,6 @@ extern void lockdep_free_key_range(void *start, unsigned long size);
 
 extern void lockdep_off(void);
 extern void lockdep_on(void);
-extern int lockdep_internal(void);
 
 /*
  * These methods are used by specific locking variants (spinlocks,
@@ -243,6 +242,8 @@ extern void lock_release(struct lockdep_map *lock, int nested,
 
 # define INIT_LOCKDEP                          .lockdep_recursion = 0,
 
+#define lockdep_depth(tsk)     ((tsk)->lockdep_depth)
+
 #else /* !LOCKDEP */
 
 static inline void lockdep_off(void)
@@ -253,11 +254,6 @@ static inline void lockdep_on(void)
 {
 }
 
-static inline int lockdep_internal(void)
-{
-       return 0;
-}
-
 # define lock_acquire(l, s, t, r, c, i)                do { } while (0)
 # define lock_release(l, n, i)                 do { } while (0)
 # define lockdep_init()                                do { } while (0)
@@ -277,6 +273,9 @@ static inline int lockdep_internal(void)
  * The class key takes no space if lockdep is disabled:
  */
 struct lock_class_key { };
+
+#define lockdep_depth(tsk)     (0)
+
 #endif /* !LOCKDEP */
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
index d538de9019652c851fc6afc281cc1eaa0969268e..a17b147c61e74853e51268c3eaf144aee8c37006 100644 (file)
@@ -114,6 +114,8 @@ struct vm_area_struct {
 #endif
 };
 
+extern struct kmem_cache *vm_area_cachep;
+
 /*
  * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
  * disabled, then there's a single shared list of VMAs maintained by the
@@ -293,6 +295,24 @@ void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
 
+/*
+ * Compound pages have a destructor function.  Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+typedef void compound_page_dtor(struct page *);
+
+static inline void set_compound_page_dtor(struct page *page,
+                                               compound_page_dtor *dtor)
+{
+       page[1].lru.next = (void *)dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+       return (compound_page_dtor *)page[1].lru.next;
+}
+
 /*
  * Multiple processes may "see" the same page. E.g. for untouched
  * mappings of /dev/null, all processes see the same page full of
@@ -396,7 +416,9 @@ void split_page(struct page *page, unsigned int order);
  * We are going to use the flags for the page to node mapping if its in
  * there.  This includes the case where there is no node, so it is implicit.
  */
-#define FLAGS_HAS_NODE         (NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
 
 #ifndef PFN_SECTION_SHIFT
 #define PFN_SECTION_SHIFT 0
@@ -411,13 +433,18 @@ void split_page(struct page *page, unsigned int order);
 #define NODES_PGSHIFT          (NODES_PGOFF * (NODES_WIDTH != 0))
 #define ZONES_PGSHIFT          (ZONES_PGOFF * (ZONES_WIDTH != 0))
 
-/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
-#if FLAGS_HAS_NODE
-#define ZONETABLE_SHIFT                (NODES_SHIFT + ZONES_SHIFT)
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+#ifdef NODE_NOT_IN_PAGEFLAGS
+#define ZONEID_SHIFT           (SECTIONS_SHIFT + ZONES_SHIFT)
+#else
+#define ZONEID_SHIFT           (NODES_SHIFT + ZONES_SHIFT)
+#endif
+
+#if ZONES_WIDTH > 0
+#define ZONEID_PGSHIFT         ZONES_PGSHIFT
 #else
-#define ZONETABLE_SHIFT                (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGSHIFT         NODES_PGOFF
 #endif
-#define ZONETABLE_PGSHIFT      ZONES_PGSHIFT
 
 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
 #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
@@ -426,26 +453,28 @@ void split_page(struct page *page, unsigned int order);
 #define ZONES_MASK             ((1UL << ZONES_WIDTH) - 1)
 #define NODES_MASK             ((1UL << NODES_WIDTH) - 1)
 #define SECTIONS_MASK          ((1UL << SECTIONS_WIDTH) - 1)
-#define ZONETABLE_MASK         ((1UL << ZONETABLE_SHIFT) - 1)
+#define ZONEID_MASK            ((1UL << ZONEID_SHIFT) - 1)
 
 static inline enum zone_type page_zonenum(struct page *page)
 {
        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
 
-struct zone;
-extern struct zone *zone_table[];
-
+/*
+ * The identification function is only used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really
+ * identifying a zone since we could be using a the section number
+ * id if we have not node id available in page flags.
+ * We guarantee only that it will return the same value for two
+ * combinable pages in a zone.
+ */
 static inline int page_zone_id(struct page *page)
 {
-       return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
-}
-static inline struct zone *page_zone(struct page *page)
-{
-       return zone_table[page_zone_id(page)];
+       BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);
+       return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 }
 
-static inline unsigned long zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(struct zone *zone)
 {
 #ifdef CONFIG_NUMA
        return zone->node;
@@ -454,13 +483,20 @@ static inline unsigned long zone_to_nid(struct zone *zone)
 #endif
 }
 
-static inline unsigned long page_to_nid(struct page *page)
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(struct page *page);
+#else
+static inline int page_to_nid(struct page *page)
+{
+       return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+}
+#endif
+
+static inline struct zone *page_zone(struct page *page)
 {
-       if (FLAGS_HAS_NODE)
-               return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
-       else
-               return zone_to_nid(page_zone(page));
+       return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 }
+
 static inline unsigned long page_to_section(struct page *page)
 {
        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -477,6 +513,7 @@ static inline void set_page_node(struct page *page, unsigned long node)
        page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
        page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 }
+
 static inline void set_page_section(struct page *page, unsigned long section)
 {
        page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -947,8 +984,6 @@ extern void mem_init(void);
 extern void show_mem(void);
 extern void si_meminfo(struct sysinfo * val);
 extern void si_meminfo_node(struct sysinfo *val, int nid);
-extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-                                       unsigned long pfn, unsigned long size);
 
 #ifdef CONFIG_NUMA
 extern void setup_per_cpu_pageset(void);
index 528e7d3fecb18123fc3f98bd022bcc5df97efc5f..c15ae1986b9833e9727a3833be875e3daac93030 100644 (file)
@@ -110,7 +110,7 @@ struct mmc_host {
        struct mmc_card         *card_busy;     /* the MMC card claiming host */
        struct mmc_card         *card_selected; /* the selected MMC card */
 
-       struct work_struct      detect;
+       struct delayed_work     detect;
 
        unsigned long           private[0] ____cacheline_aligned;
 };
index e06683e2bea39fb61fc02803aff1e5fcee2ce7fd..e339a7345f25ed88cce27e5e50544aa085a4a465 100644 (file)
@@ -278,7 +278,7 @@ struct zone {
        /*
         * rarely used fields:
         */
-       char                    *name;
+       const char              *name;
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -288,19 +288,94 @@ struct zone {
  */
 #define DEF_PRIORITY 12
 
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ *    up short of free memory since the last time (last_fullzone_zap)
+ *    we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ *    id, so that we can efficiently evaluate whether that node is
+ *    set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans.  During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set.  During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist.  However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter).  A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here.  We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct.  This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ *  1) The full, fixed length version, shown below, and
+ *  2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it.  This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+       unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];          /* zone->nid */
+       DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);      /* zone full? */
+       unsigned long last_full_zap;            /* when last zap'd (jiffies) */
+};
+#else
+struct zonelist_cache;
+#endif
+
 /*
  * One allocation request operates on a zonelist. A zonelist
  * is a list of zones, the first one is the 'goal' of the
  * allocation, the other zones are fallback zones, in decreasing
  * priority.
  *
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
  */
+
 struct zonelist {
-       struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+       struct zonelist_cache *zlcache_ptr;                  // NULL or &zlcache
+       struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited
+#ifdef CONFIG_NUMA
+       struct zonelist_cache zlcache;                       // optional ...
+#endif
 };
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
index 7c0c2c198f1f64400cb269d2e9608031e690ea30..4a189dadb1607a0984cb41cac9f389d2a86afc65 100644 (file)
@@ -63,6 +63,9 @@ struct kparam_array
    not there, read bits mean it's readable, write bits mean it's
    writable. */
 #define __module_param_call(prefix, name, set, get, arg, perm)         \
+       /* Default value instead of permissions? */                     \
+       static int __param_perm_check_##name __attribute__((unused)) =  \
+       BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
        static char __param_str_##name[] = prefix #name;                \
        static struct kernel_param const __param_##name                 \
        __attribute_used__                                              \
index acc7c174ff0091aeb8f46bed30e378633c48ef21..f1b60740d641c5a61daaed95f68b12e465b331ed 100644 (file)
@@ -92,6 +92,12 @@ struct msg_queue {
        struct list_head q_senders;
 };
 
+/* Helper routines for sys_msgsnd and sys_msgrcv */
+extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+                       size_t msgsz, int msgflg);
+extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+                       size_t msgsz, long msgtyp, int msgflg);
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_MSG_H */
index 27c48daa3183297dd84e0d6345093c8a12c14145..b2b91c47756340ab9dd1a407490c962cafb4d03c 100644 (file)
@@ -94,7 +94,7 @@ do {                                                  \
 
 #define __MUTEX_INITIALIZER(lockname) \
                { .count = ATOMIC_INIT(1) \
-               , .wait_lock = SPIN_LOCK_UNLOCKED \
+               , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
                , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
                __DEBUG_MUTEX_INITIALIZER(lockname) \
                __DEP_MAP_MUTEX_INITIALIZER(lockname) }
index d6b6dc09ad972d9f844da2e64d765d254ee8f87f..0f3e69302540d71222d61fa22e2fbdbd2706aae9 100644 (file)
@@ -64,6 +64,7 @@ struct nbd_device {
        struct gendisk *disk;
        int blksize;
        u64 bytesize;
+       pid_t pid; /* pid of nbd-client, if attached */
 };
 
 #endif
index b089d95062835dd769c5c19bfb88361dec94edbe..a503052138bdd0fa7600cfb1742f003154bf81f6 100644 (file)
@@ -127,10 +127,10 @@ struct ncp_server {
        } unexpected_packet;
 };
 
-extern void ncp_tcp_rcv_proc(void *server);
-extern void ncp_tcp_tx_proc(void *server);
-extern void ncpdgram_rcv_proc(void *server);
-extern void ncpdgram_timeout_proc(void *server);
+extern void ncp_tcp_rcv_proc(struct work_struct *work);
+extern void ncp_tcp_tx_proc(struct work_struct *work);
+extern void ncpdgram_rcv_proc(struct work_struct *work);
+extern void ncpdgram_timeout_proc(struct work_struct *work);
 extern void ncpdgram_timeout_call(unsigned long server);
 extern void ncp_tcp_data_ready(struct sock* sk, int len);
 extern void ncp_tcp_write_space(struct sock* sk);
index fb049ec11ff26ab6e6230a884fd57e7c4b9afed3..9d8144a488cd5bfa5a29910b8d1684cb0ace2386 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _NF_CONNTRACK_PPTP_H
 #define _NF_CONNTRACK_PPTP_H
 
+#include <linux/netfilter/nf_conntrack_common.h>
+
 /* state of the control session */
 enum pptp_ctrlsess_state {
        PPTP_SESSION_NONE,                      /* no session present */
@@ -295,7 +297,6 @@ union pptp_ctrl_union {
 /* crap needed for nf_conntrack_compat.h */
 struct nf_conn;
 struct nf_conntrack_expect;
-enum ip_conntrack_info;
 
 extern int
 (*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb,
index 2cc9867b16260e58ce9c4adaeb5ba7d7f61e453b..29930b71a9aab0b263a470c74ba49171c466d31c 100644 (file)
@@ -32,7 +32,7 @@ struct netpoll_info {
        struct netpoll *rx_np; /* netpoll that registered an rx_hook */
        struct sk_buff_head arp_tx; /* list of arp requests to reply to */
        struct sk_buff_head txq;
-       struct work_struct tx_work;
+       struct delayed_work tx_work;
 };
 
 void netpoll_poll(struct netpoll *np);
index 625ffea98561e4ca340636c1e5610ed66931d5a1..04963063e6200023dd0524f023b30f4973317df1 100644 (file)
@@ -33,6 +33,7 @@
 #define FLUSH_HIGHPRI          16      /* high priority memory reclaim flush */
 #define FLUSH_NOCOMMIT         32      /* Don't send the NFSv3/v4 COMMIT */
 #define FLUSH_INVALIDATE       64      /* Invalidate the page cache */
+#define FLUSH_NOWRITEPAGE      128     /* Don't call writepage() */
 
 #ifdef __KERNEL__
 
@@ -427,19 +428,21 @@ extern int  nfs_flush_incompatible(struct file *file, struct page *page);
 extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
 extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
 extern void nfs_writedata_release(void *);
-
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-struct nfs_write_data *nfs_commit_alloc(void);
-void nfs_commit_free(struct nfs_write_data *p);
-#endif
+extern int nfs_set_page_dirty(struct page *);
 
 /*
  * Try to write back everything synchronously (but check the
  * return value!)
  */
-extern int  nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int);
+extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int);
+extern int nfs_sync_mapping_range(struct address_space *, loff_t, loff_t, int);
+extern int nfs_wb_all(struct inode *inode);
+extern int nfs_wb_page(struct inode *inode, struct page* page);
+extern int nfs_wb_page_priority(struct inode *inode, struct page* page, int how);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 extern int  nfs_commit_inode(struct inode *, int);
+extern struct nfs_write_data *nfs_commit_alloc(void);
+extern void nfs_commit_free(struct nfs_write_data *wdata);
 extern void nfs_commit_release(void *wdata);
 #else
 static inline int
@@ -455,28 +458,6 @@ nfs_have_writebacks(struct inode *inode)
        return NFS_I(inode)->npages != 0;
 }
 
-static inline int
-nfs_wb_all(struct inode *inode)
-{
-       int error = nfs_sync_inode_wait(inode, 0, 0, 0);
-       return (error < 0) ? error : 0;
-}
-
-/*
- * Write back all requests on one page - we do this before reading it.
- */
-static inline int nfs_wb_page_priority(struct inode *inode, struct page* page, int how)
-{
-       int error = nfs_sync_inode_wait(inode, page->index, 1,
-                       how | FLUSH_STABLE);
-       return (error < 0) ? error : 0;
-}
-
-static inline int nfs_wb_page(struct inode *inode, struct page* page)
-{
-       return nfs_wb_page_priority(inode, page, 0);
-}
-
 /*
  * Allocate nfs_write_data structures
  */
index 7ccfc7ef0a83afd2a58193ddcaa103f442e051cb..95796e6924f1d10be492771595275b7c169dde3a 100644 (file)
@@ -51,7 +51,7 @@ struct nfs_client {
 
        unsigned long           cl_lease_time;
        unsigned long           cl_last_renewal;
-       struct work_struct      cl_renewd;
+       struct delayed_work     cl_renewd;
 
        struct rpc_wait_queue   cl_rpcwaitq;
 
index 1f7bd287c230e5cc78cc2cd079b39d77f3768c99..2e555d49c9b732fa4379b8b1c17a98cb430ce857 100644 (file)
@@ -30,6 +30,8 @@
 #define PG_BUSY                        0
 #define PG_NEED_COMMIT         1
 #define PG_NEED_RESCHED                2
+#define PG_NEED_FLUSH          3
+#define PG_FLUSHING            4
 
 struct nfs_inode;
 struct nfs_page {
@@ -60,8 +62,9 @@ extern        void nfs_clear_request(struct nfs_page *req);
 extern void nfs_release_request(struct nfs_page *req);
 
 
-extern  int nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
-                               unsigned long idx_start, unsigned int npages);
+extern long nfs_scan_dirty(struct address_space *mapping,
+                               struct writeback_control *wbc,
+                               struct list_head *dst);
 extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, struct list_head *dst,
                          unsigned long idx_start, unsigned int npages);
 extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
index 768c1ad5ff6f93e1859ca7771fb20fbf36c4faf2..9ee9da5e1cc9de834a4e81c9155b3e9e6b0f57ca 100644 (file)
@@ -785,8 +785,6 @@ struct nfs_rpc_ops {
        int     (*readlink)(struct inode *, struct page *, unsigned int,
                            unsigned int);
        int     (*read)    (struct nfs_read_data *);
-       int     (*write)   (struct nfs_write_data *);
-       int     (*commit)  (struct nfs_write_data *);
        int     (*create)  (struct inode *, struct dentry *,
                            struct iattr *, int, struct nameidata *);
        int     (*remove)  (struct inode *, struct qstr *);
index e16904e28c3a39b57480d5bc5e64f5923cc5434a..acb4ed1302479ac4f1833466905024e013de41e7 100644 (file)
  * disables interrupts for a long time. This call is stateless.
  */
 #ifdef ARCH_HAS_NMI_WATCHDOG
+#include <asm/nmi.h>
 extern void touch_nmi_watchdog(void);
 #else
 # define touch_nmi_watchdog() touch_softlockup_watchdog()
 #endif
 
+#ifndef trigger_all_cpu_backtrace
+#define trigger_all_cpu_backtrace() do { } while (0)
+#endif
+
 #endif
index c09da1e30c54f8f7d2274899d4e9be179c5efe85..4d972bbef31610e877a4b5739a7299d291009d9a 100644 (file)
 #define PCI_DEVICE_ID_NS_CS5535_IDE    0x002d
 #define PCI_DEVICE_ID_NS_CS5535_AUDIO  0x002e
 #define PCI_DEVICE_ID_NS_CS5535_USB    0x002f
-#define PCI_DEVICE_ID_NS_CS5535_VIDEO  0x0030
+#define PCI_DEVICE_ID_NS_GX_VIDEO      0x0030
 #define PCI_DEVICE_ID_NS_SATURN                0x0035
 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
 #define PCI_DEVICE_ID_NS_SCx200_SMI    0x0501
 #define PCI_DEVICE_ID_NS_SC1100_XBUS   0x0515
 #define PCI_DEVICE_ID_NS_87410         0xd001
 
-#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE  0x0028
-#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE   0x002b
+#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE  0x0028
 
 #define PCI_VENDOR_ID_TSENG            0x100c
 #define PCI_DEVICE_ID_TSENG_W32P_2     0x3202
 #define PCI_DEVICE_ID_OXSEMI_16PCI95N  0x9511
 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP        0x9513
 #define PCI_DEVICE_ID_OXSEMI_16PCI952  0x9521
+#define PCI_DEVICE_ID_OXSEMI_16PCI952PP        0x9523
 
 #define PCI_VENDOR_ID_SAMSUNG          0x144d
 
 #define PCI_DEVICE_ID_TIGON3_5750M     0x167c
 #define PCI_DEVICE_ID_TIGON3_5751M     0x167d
 #define PCI_DEVICE_ID_TIGON3_5751F     0x167e
+#define PCI_DEVICE_ID_TIGON3_5787F     0x167f
 #define PCI_DEVICE_ID_TIGON3_5787M     0x1693
 #define PCI_DEVICE_ID_TIGON3_5782      0x1696
 #define PCI_DEVICE_ID_TIGON3_5786      0x169a
 #define PCI_DEVICE_ID_FARSITE_TE1       0x1610
 #define PCI_DEVICE_ID_FARSITE_TE1C      0x1612
 
+#define PCI_VENDOR_ID_ARIMA            0x161f
+
 #define PCI_VENDOR_ID_SIBYTE           0x166d
 #define PCI_DEVICE_ID_BCM1250_PCI      0x0001
 #define PCI_DEVICE_ID_BCM1250_HT       0x0002
index 0f0b880c428034f66f9a6203fa58d1f5962541fe..265bafab64941225f0cad5900e349e4849bdde42 100644 (file)
@@ -285,6 +285,7 @@ struct sadb_x_sec_ctx {
 #define SADB_X_AALG_SHA2_384HMAC       6
 #define SADB_X_AALG_SHA2_512HMAC       7
 #define SADB_X_AALG_RIPEMD160HMAC      8
+#define SADB_X_AALG_AES_XCBC_MAC       9
 #define SADB_X_AALG_NULL               251     /* kame */
 #define SADB_AALG_MAX                  251
 
index acce53fd38b6484416ddc855996c0974bada2e81..5670b340c4ef4d7b5561f7d5635e4386a135b11b 100644 (file)
@@ -6,10 +6,15 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cpumask.h>
+#include <linux/cache.h>
+
 #include <asm/errno.h>
 
+extern int prof_on __read_mostly;
+
 #define CPU_PROFILING  1
 #define SCHED_PROFILING        2
+#define SLEEP_PROFILING        3
 
 struct proc_dir_entry;
 struct pt_regs;
@@ -18,7 +23,24 @@ struct notifier_block;
 /* init basic kernel profiler */
 void __init profile_init(void);
 void profile_tick(int);
-void profile_hit(int, void *);
+
+/*
+ * Add multiple profiler hits to a given address:
+ */
+void profile_hits(int, void *ip, unsigned int nr_hits);
+
+/*
+ * Single profiler hit:
+ */
+static inline void profile_hit(int type, void *ip)
+{
+       /*
+        * Speedup for the common (no profiling enabled) case:
+        */
+       if (unlikely(prof_on == type))
+               profile_hits(type, ip, 1);
+}
+
 #ifdef CONFIG_PROC_FS
 void create_prof_cpu_mask(struct proc_dir_entry *);
 #else
index 5110201a415949e5e10bd8381ff4f38e120320cf..90c23f690c0deab4fedeb6f5126dfbb4838cb54d 100644 (file)
@@ -37,6 +37,9 @@ extern int dquot_release(struct dquot *dquot);
 extern int dquot_commit_info(struct super_block *sb, int type);
 extern int dquot_mark_dquot_dirty(struct dquot *dquot);
 
+int remove_inode_dquot_ref(struct inode *inode, int type,
+                          struct list_head *tofree_head);
+
 extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
 extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
                int format_id, int type);
index cbfa1153742120d4a01798e181a06815555281e6..0deb842541acee1aad74d1e742895ba9812d0661 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
 
 #include <linux/preempt.h>
 #include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+
+/*
+ * A direct pointer (root->rnode pointing directly to a data item,
+ * rather than another radix_tree_node) is signalled by the low bit
+ * set in the root->rnode pointer.
+ *
+ * In this case root->height is also NULL, but the direct pointer tests are
+ * needed for RCU lookups when root->height is unreliable.
+ */
+#define RADIX_TREE_DIRECT_PTR  1
+
+static inline void *radix_tree_ptr_to_direct(void *ptr)
+{
+       return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
+}
+
+static inline void *radix_tree_direct_to_ptr(void *ptr)
+{
+       return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
+}
+
+static inline int radix_tree_is_direct_ptr(void *ptr)
+{
+       return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
+}
+
+/*** radix-tree API starts here ***/
 
 #define RADIX_TREE_MAX_TAGS 2
 
@@ -47,6 +77,77 @@ do {                                                                 \
        (root)->rnode = NULL;                                           \
 } while (0)
 
+/**
+ * Radix-tree synchronization
+ *
+ * The radix-tree API requires that users provide all synchronisation (with
+ * specific exceptions, noted below).
+ *
+ * Synchronization of access to the data items being stored in the tree, and
+ * management of their lifetimes must be completely managed by API users.
+ *
+ * For API usage, in general,
+ * - any function _modifying_ the the tree or tags (inserting or deleting
+ *   items, setting or clearing tags must exclude other modifications, and
+ *   exclude any functions reading the tree.
+ * - any function _reading_ the the tree or tags (looking up items or tags,
+ *   gang lookups) must exclude modifications to the tree, but may occur
+ *   concurrently with other readers.
+ *
+ * The notable exceptions to this rule are the following functions:
+ * radix_tree_lookup
+ * radix_tree_tag_get
+ * radix_tree_gang_lookup
+ * radix_tree_gang_lookup_tag
+ * radix_tree_tagged
+ *
+ * The first 4 functions are able to be called locklessly, using RCU. The
+ * caller must ensure calls to these functions are made within rcu_read_lock()
+ * regions. Other readers (lock-free or otherwise) and modifications may be
+ * running concurrently.
+ *
+ * It is still required that the caller manage the synchronization and lifetimes
+ * of the items. So if RCU lock-free lookups are used, typically this would mean
+ * that the items have their own locks, or are amenable to lock-free access; and
+ * that the items are freed by RCU (or only freed after having been deleted from
+ * the radix tree *and* a synchronize_rcu() grace period).
+ *
+ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
+ * access to data items when inserting into or looking up from the radix tree)
+ *
+ * radix_tree_tagged is able to be called without locking or RCU.
+ */
+
+/**
+ * radix_tree_deref_slot       - dereference a slot
+ * @pslot:     pointer to slot, returned by radix_tree_lookup_slot
+ * Returns:    item that was stored in that slot with any direct pointer flag
+ *             removed.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
+ * locked across slot lookup and dereference.  More likely, will be used with
+ * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ */
+static inline void *radix_tree_deref_slot(void **pslot)
+{
+       return radix_tree_direct_to_ptr(*pslot);
+}
+/**
+ * radix_tree_replace_slot     - replace item in a slot
+ * @pslot:     pointer to slot, returned by radix_tree_lookup_slot
+ * @item:      new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot().  Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+static inline void radix_tree_replace_slot(void **pslot, void *item)
+{
+       BUG_ON(radix_tree_is_direct_ptr(item));
+       rcu_assign_pointer(*pslot,
+               (void *)((unsigned long)item |
+                       ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
+}
+
 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
index f13299a15591ed6170bb440495b69edd6262104a..03636d7918fef80b565b0e1141247be35c79bd88 100644 (file)
@@ -235,7 +235,7 @@ struct raid5_private_data {
         */
        int                     active_name;
        char                    cache_name[2][20];
-       kmem_cache_t            *slab_cache; /* for allocating stripes */
+       struct kmem_cache               *slab_cache; /* for allocating stripes */
 
        int                     seq_flush, seq_write;
        int                     quiesce;
index 7bc6bfb86253899c9ad088ffa39e9f4c81c71147..d0e4dce33ad571f49ea9da77d37d4c3b560a48df 100644 (file)
@@ -739,7 +739,7 @@ struct block_head {
 #define PUT_B_FREE_SPACE(p_s_bh,val)  do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
 
 /* Get right delimiting key. -- little endian */
-#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))
+#define B_PRIGHT_DELIM_KEY(p_s_bh)   (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
 
 /* Does the buffer contain a disk leaf. */
 #define B_IS_ITEMS_LEVEL(p_s_bh)     (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
index 62a7169aed8b12d1a4e231f57b93d68bd4ec040e..3a28742d86f96ce04faaad22f0b296619724da89 100644 (file)
@@ -249,7 +249,8 @@ struct reiserfs_journal {
        int j_errno;
 
        /* when flushing ordered buffers, throttle new ordered writers */
-       struct work_struct j_work;
+       struct delayed_work j_work;
+       struct super_block *j_work_sb;
        atomic_t j_async_throttle;
 };
 
index 24accb483849c86415406a33ff685bc927900441..c6a48bfc8b14700b5dee19e9b6630744c5187129 100644 (file)
@@ -38,7 +38,7 @@ struct rchan_buf
        size_t subbufs_consumed;        /* count of sub-buffers consumed */
        struct rchan *chan;             /* associated channel */
        wait_queue_head_t read_wait;    /* reader wait queue */
-       struct work_struct wake_readers; /* reader wake-up work struct */
+       struct delayed_work wake_readers; /* reader wake-up work struct */
        struct dentry *dentry;          /* channel file dentry */
        struct kref kref;               /* channel buffer refcount */
        struct page **page_array;       /* array of current buffer pages */
@@ -274,7 +274,7 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
 /*
  * exported relay file operations, kernel/relay.c
  */
-extern struct file_operations relay_file_operations;
+extern const struct file_operations relay_file_operations;
 
 #endif /* _LINUX_RELAY_H */
 
index db2c1df4fef96acdc9715b5fb7baaa88f27c8e0f..36f850373d2c85180c0e71f523f6f70eb66463e5 100644 (file)
@@ -30,11 +30,11 @@ struct anon_vma {
 
 #ifdef CONFIG_MMU
 
-extern kmem_cache_t *anon_vma_cachep;
+extern struct kmem_cache *anon_vma_cachep;
 
 static inline struct anon_vma *anon_vma_alloc(void)
 {
-       return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL);
+       return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
 }
 
 static inline void anon_vma_free(struct anon_vma *anon_vma)
index 5d41dee82f80310d333aa2ee5ba67a07bbc54d63..b0090e9f788432f84a1d67b86d2879ad4a74e031 100644 (file)
@@ -63,7 +63,7 @@ struct hrtimer_sleeper;
 #endif
 
 #define __RT_MUTEX_INITIALIZER(mutexname) \
-       { .wait_lock = SPIN_LOCK_UNLOCKED \
+       { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
        , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
        , .owner = NULL \
        __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
index ae1fcadd598e222aa8f572e768aa5a331ca986a7..813cee13da0db4335a38a504916286b1550a7efb 100644 (file)
@@ -44,7 +44,8 @@ struct rw_semaphore {
 #endif
 
 #define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+  __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
index eafe4a7b8237d902e31eef13aaacd212ea5b84f4..dede82c6344547edfa18bd60601a17ce2740f114 100644 (file)
@@ -194,7 +194,16 @@ extern void init_idle(struct task_struct *idle, int cpu);
 
 extern cpumask_t nohz_cpu_mask;
 
-extern void show_state(void);
+/*
+ * Only dump TASK_* tasks. (-1 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+       show_state_filter(-1);
+}
+
 extern void show_regs(struct pt_regs *);
 
 /*
@@ -338,15 +347,23 @@ struct mm_struct {
 
        unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
-       unsigned dumpable:2;
        cpumask_t cpu_vm_mask;
 
        /* Architecture-specific MM context */
        mm_context_t context;
 
-       /* Token based thrashing protection. */
-       unsigned long swap_token_time;
-       char recent_pagein;
+       /* Swap token stuff */
+       /*
+        * Last value of global fault stamp as seen by this process.
+        * In other words, this value gives an indication of how long
+        * it has been since this task got the token.
+        * Look at mm/thrash.c
+        */
+       unsigned int faultstamp;
+       unsigned int token_priority;
+       unsigned int last_interval;
+
+       unsigned char dumpable:2;
 
        /* coredumping support */
        int core_waiters;
@@ -556,7 +573,7 @@ struct sched_info {
 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 
 #ifdef CONFIG_SCHEDSTATS
-extern struct file_operations proc_schedstat_operations;
+extern const struct file_operations proc_schedstat_operations;
 #endif /* CONFIG_SCHEDSTATS */
 
 #ifdef CONFIG_TASK_DELAY_ACCT
@@ -1288,7 +1305,6 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
 extern int kill_pid(struct pid *pid, int sig, int priv);
 extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
 extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
 extern void do_notify_parent(struct task_struct *, int);
 extern void force_sig(int, struct task_struct *);
 extern void force_sig_specific(int, struct task_struct *);
@@ -1610,87 +1626,6 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
 
 extern void normalize_rt_tasks(void);
 
-#ifdef CONFIG_PM
-/*
- * Check if a process has been frozen
- */
-static inline int frozen(struct task_struct *p)
-{
-       return p->flags & PF_FROZEN;
-}
-
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
-       return p->flags & PF_FREEZE;
-}
-
-/*
- * Request that a process be frozen
- * FIXME: SMP problem. We may not modify other process' flags!
- */
-static inline void freeze(struct task_struct *p)
-{
-       p->flags |= PF_FREEZE;
-}
-
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void do_not_freeze(struct task_struct *p)
-{
-       p->flags &= ~PF_FREEZE;
-}
-
-/*
- * Wake up a frozen process
- */
-static inline int thaw_process(struct task_struct *p)
-{
-       if (frozen(p)) {
-               p->flags &= ~PF_FROZEN;
-               wake_up_process(p);
-               return 1;
-       }
-       return 0;
-}
-
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
-{
-       p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
-}
-
-extern void refrigerator(void);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-static inline int try_to_freeze(void)
-{
-       if (freezing(current)) {
-               refrigerator();
-               return 1;
-       } else
-               return 0;
-}
-#else
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
-
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
-
-#endif /* CONFIG_PM */
 #endif /* __KERNEL__ */
 
 #endif
index 2925e66a6732937f1693e2e5fd1868ec6d86c3df..b02308ee7667790c78aed464aaddbea9e65c4fb9 100644 (file)
@@ -42,7 +42,8 @@ struct screen_info {
        u16 pages;              /* 0x32 */
        u16 vesa_attributes;    /* 0x34 */
        u32 capabilities;       /* 0x36 */
-                               /* 0x3a -- 0x3f reserved for future expansion */
+                               /* 0x3a -- 0x3b reserved for future expansion */
+                               /* 0x3c -- 0x3f micro stack for relocatable kernels */
 };
 
 extern struct screen_info screen_info;
index b95f6eb7254cd889bfc5c9db9ab2d6029e0f2994..3e3cccbb1cac355f385f474477f2222015e1e528 100644 (file)
@@ -20,7 +20,7 @@ struct seq_file {
        loff_t index;
        loff_t version;
        struct mutex lock;
-       struct seq_operations *op;
+       const struct seq_operations *op;
        void *private;
 };
 
@@ -31,7 +31,7 @@ struct seq_operations {
        int (*show) (struct seq_file *m, void *v);
 };
 
-int seq_open(struct file *, struct seq_operations *);
+int seq_open(struct file *, const struct seq_operations *);
 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
 loff_t seq_lseek(struct file *, loff_t, int);
 int seq_release(struct inode *, struct file *);
index 8e9681413726bc5db22b6ffb031870f8bf3e91bf..71310d80c09aae4d866f743baeb6e5ae932651e2 100644 (file)
@@ -41,6 +41,7 @@ enum {
        PLAT8250_DEV_FOURPORT,
        PLAT8250_DEV_ACCENT,
        PLAT8250_DEV_BOCA,
+       PLAT8250_DEV_EXAR_ST16C554,
        PLAT8250_DEV_HUB6,
        PLAT8250_DEV_MCA,
        PLAT8250_DEV_AU1X00,
index 463ab953b0925f2607b52e20d70bc06f4fdced38..827672136646a1be0d32e84003c3f5eaf0a32907 100644 (file)
 
 #define PORT_S3C2412   73
 
+/* Xilinx uartlite */
+#define PORT_UARTLITE  74
 
 #ifdef __KERNEL__
 
index 117135e33d67116e4cf303b2f3872d2c6e63a1b8..14749056dd630ac0072439ed53225caac9025c2f 100644 (file)
@@ -241,6 +241,8 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
 struct pt_regs;
 extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
 
+extern struct kmem_cache *sighand_cachep;
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SIGNAL_H */
index a05a5f7c0b738ff8527dd97e1572cdcee32b69f7..4ff3940210d8f5518788d965bc1c7a73406bab36 100644 (file)
@@ -332,20 +332,20 @@ struct sk_buff {
 extern void kfree_skb(struct sk_buff *skb);
 extern void           __kfree_skb(struct sk_buff *skb);
 extern struct sk_buff *__alloc_skb(unsigned int size,
-                                  gfp_t priority, int fclone);
+                                  gfp_t priority, int fclone, int node);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
-       return __alloc_skb(size, priority, 0);
+       return __alloc_skb(size, priority, 0, -1);
 }
 
 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
                                               gfp_t priority)
 {
-       return __alloc_skb(size, priority, 1);
+       return __alloc_skb(size, priority, 1, -1);
 }
 
-extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
                                            unsigned int size,
                                            gfp_t priority);
 extern void           kfree_skbmem(struct sk_buff *skb);
index c4947b8a2c0333f00a520fb83db5b77a725e09fe..2271886744f87a5fb43d5f307b7c062dc54528c1 100644 (file)
@@ -7,27 +7,17 @@
 #ifndef _LINUX_SLAB_H
 #define        _LINUX_SLAB_H
 
-#if    defined(__KERNEL__)
+#ifdef __KERNEL__
 
-typedef struct kmem_cache kmem_cache_t;
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/page.h>          /* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h>         /* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
 
-#include       <linux/gfp.h>
-#include       <linux/init.h>
-#include       <linux/types.h>
-#include       <asm/page.h>            /* kmalloc_sizes.h needs PAGE_SIZE */
-#include       <asm/cache.h>           /* kmalloc_sizes.h needs L1_CACHE_BYTES */
-
-/* flags for kmem_cache_alloc() */
-#define        SLAB_NOFS               GFP_NOFS
-#define        SLAB_NOIO               GFP_NOIO
-#define        SLAB_ATOMIC             GFP_ATOMIC
-#define        SLAB_USER               GFP_USER
-#define        SLAB_KERNEL             GFP_KERNEL
-#define        SLAB_DMA                GFP_DMA
-
-#define SLAB_LEVEL_MASK                GFP_LEVEL_MASK
-
-#define        SLAB_NO_GROW            __GFP_NO_GROW   /* don't grow a cache */
+/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
+typedef struct kmem_cache kmem_cache_t __deprecated;
 
 /* flags to pass to kmem_cache_create().
  * The first 3 are only valid when the allocator as been build
@@ -57,22 +47,23 @@ typedef struct kmem_cache kmem_cache_t;
 /* prototypes */
 extern void __init kmem_cache_init(void);
 
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
-                                      void (*)(void *, kmem_cache_t *, unsigned long),
-                                      void (*)(void *, kmem_cache_t *, unsigned long));
-extern void kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+                       unsigned long,
+                       void (*)(void *, struct kmem_cache *, unsigned long),
+                       void (*)(void *, struct kmem_cache *, unsigned long));
+extern void kmem_cache_destroy(struct kmem_cache *);
+extern int kmem_cache_shrink(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
 extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
+extern void kmem_cache_free(struct kmem_cache *, void *);
+extern unsigned int kmem_cache_size(struct kmem_cache *);
+extern const char *kmem_cache_name(struct kmem_cache *);
 
 /* Size description struct for general caches. */
 struct cache_sizes {
-       size_t           cs_size;
-       kmem_cache_t    *cs_cachep;
-       kmem_cache_t    *cs_dmacachep;
+       size_t                  cs_size;
+       struct kmem_cache       *cs_cachep;
+       struct kmem_cache       *cs_dmacachep;
 };
 extern struct cache_sizes malloc_sizes[];
 
@@ -211,7 +202,7 @@ extern unsigned int ksize(const void *);
 extern int slab_is_available(void);
 
 #ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
 
 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
@@ -236,8 +227,27 @@ found:
        }
        return __kmalloc_node(size, flags, node);
 }
+
+/*
+ * kmalloc_node_track_caller is a special version of kmalloc_node that
+ * records the calling function of the routine calling it for slab leak
+ * tracking instead of just the calling function (confusing, eh?).
+ * It's useful when the call to kmalloc_node comes from a widely-used
+ * standard allocator where we care about the real place the memory
+ * allocation request comes from.
+ */
+#ifndef CONFIG_DEBUG_SLAB
+#define kmalloc_node_track_caller(size, flags, node) \
+       __kmalloc_node(size, flags, node)
 #else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
+#define kmalloc_node_track_caller(size, flags, node) \
+       __kmalloc_node_track_caller(size, flags, node, \
+                       __builtin_return_address(0))
+#endif
+#else /* CONFIG_NUMA */
+static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+                                       gfp_t flags, int node)
 {
        return kmem_cache_alloc(cachep, flags);
 }
@@ -245,10 +255,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
 {
        return kmalloc(size, flags);
 }
+
+#define kmalloc_node_track_caller(size, flags, node) \
+       kmalloc_track_caller(size, flags)
 #endif
 
 extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
+extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
 
 #else /* CONFIG_SLOB */
 
@@ -283,16 +296,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
 #define kzalloc(s, f) __kzalloc(s, f)
 #define kmalloc_track_caller kmalloc
 
-#endif /* CONFIG_SLOB */
+#define kmalloc_node_track_caller kmalloc_node
 
-/* System wide caches */
-extern kmem_cache_t    *vm_area_cachep;
-extern kmem_cache_t    *names_cachep;
-extern kmem_cache_t    *files_cachep;
-extern kmem_cache_t    *filp_cachep;
-extern kmem_cache_t    *fs_cachep;
-extern kmem_cache_t    *sighand_cachep;
-extern kmem_cache_t    *bio_cachep;
+#endif /* CONFIG_SLOB */
 
 #endif /* __KERNEL__ */
 
index 51649987f691dad9b339d3ac8d8770ac2d5ede08..7ba23ec8211b11f22edd21369f824d81b5a2ee52 100644 (file)
@@ -99,6 +99,13 @@ static inline int up_smp_call_function(void)
 static inline void smp_send_reschedule(int cpu) { }
 #define num_booting_cpus()                     1
 #define smp_prepare_boot_cpu()                 do {} while (0)
+static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
+                               void *info, int retry, int wait)
+{
+       /* Disable interrupts here? */
+       func(info);
+       return 0;
+}
 
 #endif /* !SMP */
 
index 8451052ca66f4ad14da7bc1bc120578d455fb626..94b767d6427561297772ad556d9466333f424732 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/thread_info.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#include <linux/bottom_half.h>
 
 #include <asm/system.h>
 
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
new file mode 100644 (file)
index 0000000..d3e5f27
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _LINUX_START_KERNEL_H
+#define _LINUX_START_KERNEL_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+/* Define the prototype for start_kernel here, rather than cluttering
+   up something else. */
+
+extern asmlinkage void __init start_kernel(void);
+
+#endif /* _LINUX_START_KERNEL_H */
index 97b62e97dd8d3fcaef89e0332005d97abd397c5c..2db2fbf349472e3760a003f6d046e35687658ed6 100644 (file)
@@ -90,8 +90,6 @@ struct gss_cred {
 #define gc_flags               gc_base.cr_flags
 #define gc_expire              gc_base.cr_expire
 
-void print_hexl(u32 *p, u_int length, u_int offset);
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SUNRPC_AUTH_GSS_H */
 
index f6d1d646ce05c1039437ff7af52155df9afbe353..a1be89deb3af7cb0f5d592b5bbfcdb82e216ccd0 100644 (file)
@@ -53,6 +53,7 @@ struct rpc_clnt {
        struct dentry *         cl_dentry;      /* inode */
        struct rpc_clnt *       cl_parent;      /* Points to parent of clones */
        struct rpc_rtt          cl_rtt_default;
+       struct rpc_program *    cl_program;
        char                    cl_inline_name[32];
 };
 
index e4729aa676547d49926d9fe798204eb7f7910972..60fce3c928570166fdf354eee838adc22dc24d37 100644 (file)
@@ -62,12 +62,6 @@ extern unsigned int          nlm_debug;
 # define RPC_IFDEBUG(x)
 #endif
 
-#ifdef RPC_PROFILE
-# define pprintk(args...)      printk(## args)
-#else
-# define pprintk(args...)      do ; while (0)
-#endif
-
 /*
  * Sysctl interface for RPC debugging
  */
index e30ba201910ae8c24dde51bd01451a6fca6e0614..5a4b1e0206e3a8afff8bf8f7a7fc00051f121ba0 100644 (file)
 
 struct krb5_ctx {
        int                     initiate; /* 1 = initiating, 0 = accepting */
-       int                     seed_init;
-       unsigned char           seed[16];
-       int                     signalg;
-       int                     sealalg;
        struct crypto_blkcipher *enc;
        struct crypto_blkcipher *seq;
        s32                     endtime;
@@ -117,7 +113,7 @@ enum seal_alg {
 #define ENCTYPE_UNKNOWN         0x01ff
 
 s32
-make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
+make_checksum(char *, char *header, int hdrlen, struct xdr_buf *body,
                   int body_offset, struct xdr_netobj *cksum);
 
 u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
index 2cf3fbb40b4fe1e9617153e86be151ac44c62c0b..e3e6a3437f8b20b63c31bfdc792a16a906ebe2f9 100644 (file)
 #include <linux/sunrpc/gss_asn1.h>
 
 struct spkm3_ctx {
-       struct xdr_netobj       ctx_id; /* per message context id */
-       int                     qop;         /* negotiated qop */
+       struct xdr_netobj       ctx_id;  /* per message context id */
+       int                     endtime; /* endtime of the context */
        struct xdr_netobj       mech_used;
        unsigned int            ret_flags ;
-       unsigned int            req_flags ;
-       struct xdr_netobj       share_key;
-       int                     conf_alg;
-       struct crypto_blkcipher *derived_conf_key;
-       int                     intg_alg;
-       struct crypto_blkcipher *derived_integ_key;
-       int                     keyestb_alg;   /* alg used to get share_key */
-       int                     owf_alg;   /* one way function */
+       struct xdr_netobj       conf_alg;
+       struct xdr_netobj       derived_conf_key;
+       struct xdr_netobj       intg_alg;
+       struct xdr_netobj       derived_integ_key;
 };
 
-/* from openssl/objects.h */
-/* XXX need SEAL_ALG_NONE */
-#define NID_md5                4
-#define NID_dhKeyAgreement     28 
-#define NID_des_cbc            31 
-#define NID_sha1               64
-#define NID_cast5_cbc          108
+/* OIDs declarations for K-ALG, I-ALG, C-ALG, and OWF-ALG */
+extern const struct xdr_netobj hmac_md5_oid;
+extern const struct xdr_netobj cast5_cbc_oid;
 
 /* SPKM InnerContext Token types */
 
@@ -46,11 +38,13 @@ u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_ne
 u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype);
 
 #define CKSUMTYPE_RSA_MD5            0x0007
+#define CKSUMTYPE_HMAC_MD5           0x0008
 
-s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
-                   int body_offset, struct xdr_netobj *cksum);
+s32 make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
+               unsigned int hdrlen, struct xdr_buf *body,
+               unsigned int body_offset, struct xdr_netobj *cksum);
 void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
-int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, 
+int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
                    int explen);
 void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, 
                    unsigned char *ctxhdr, int elen, int zbit);
index a2eb9b4a9de32ef2f5ef2b7605b88ace1dfc316e..4a68125b6de6308a37b6133973088da421e08fd4 100644 (file)
@@ -30,7 +30,7 @@ struct rpc_inode {
 #define RPC_PIPE_WAIT_FOR_OPEN 1
        int flags;
        struct rpc_pipe_ops *ops;
-       struct work_struct queue_timeout;
+       struct delayed_work queue_timeout;
 };
 
 static inline struct rpc_inode *
index f399c138f79dec7cb39cf95c1d80a107a16937f5..97c761652581a722b7872bab49445e37521a8ea7 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/timer.h>
 #include <linux/sunrpc/types.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
@@ -85,6 +86,7 @@ struct rpc_task {
        union {
                struct work_struct      tk_work;        /* Async task work queue */
                struct rpc_wait         tk_wait;        /* RPC wait */
+               struct rcu_head         tk_rcu;         /* for task deletion */
        } u;
 
        unsigned short          tk_timeouts;    /* maj timeouts */
@@ -178,13 +180,6 @@ struct rpc_call_ops {
        } while (0)
 
 #define RPC_IS_ACTIVATED(t)    (test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
-#define rpc_set_active(t)      (set_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
-#define rpc_clear_active(t)    \
-       do { \
-               smp_mb__before_clear_bit(); \
-               clear_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate); \
-               smp_mb__after_clear_bit(); \
-       } while(0)
 
 /*
  * Task priorities.
@@ -222,7 +217,7 @@ struct rpc_wait_queue {
 
 #ifndef RPC_DEBUG
 # define RPC_WAITQ_INIT(var,qname) { \
-               .lock = SPIN_LOCK_UNLOCKED, \
+               .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
                .tasks = { \
                        [0] = LIST_HEAD_INIT(var.tasks[0]), \
                        [1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -231,7 +226,7 @@ struct rpc_wait_queue {
        }
 #else
 # define RPC_WAITQ_INIT(var,qname) { \
-               .lock = SPIN_LOCK_UNLOCKED, \
+               .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
                .tasks = { \
                        [0] = LIST_HEAD_INIT(var.tasks[0]), \
                        [1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -254,8 +249,10 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
 void           rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
                                int flags, const struct rpc_call_ops *ops,
                                void *data);
+void           rpc_put_task(struct rpc_task *);
 void           rpc_release_task(struct rpc_task *);
 void           rpc_exit_task(struct rpc_task *);
+void           rpc_release_calldata(const struct rpc_call_ops *, void *);
 void           rpc_killall_tasks(struct rpc_clnt *);
 int            rpc_execute(struct rpc_task *);
 void           rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
index 9a527c3643948a266bdcc862c2537e5f277a2f56..9e340fa23c0633f55c6e152d049874c41282f78a 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/uio.h>
 #include <asm/byteorder.h>
+#include <linux/scatterlist.h>
 
 /*
  * Buffer adjustment
@@ -139,29 +140,30 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
  */
 extern void xdr_shift_buf(struct xdr_buf *, size_t);
 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
-extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, int, int);
-extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, int);
-extern int read_bytes_from_xdr_buf(struct xdr_buf *, int, void *, int);
-extern int write_bytes_to_xdr_buf(struct xdr_buf *, int, void *, int);
+extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
+extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
 
 /*
  * Helper structure for copying from an sk_buff.
  */
-typedef struct {
+struct xdr_skb_reader {
        struct sk_buff  *skb;
        unsigned int    offset;
        size_t          count;
        __wsum          csum;
-} skb_reader_t;
+};
 
-typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
+typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
 
+size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
 extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
 extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
-               skb_reader_t *, skb_read_actor_t);
+               struct xdr_skb_reader *, xdr_skb_read_actor);
 
-extern int xdr_encode_word(struct xdr_buf *, int, u32);
-extern int xdr_decode_word(struct xdr_buf *, int, u32 *);
+extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
+extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
 
 struct xdr_array2_desc;
 typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
@@ -196,6 +198,7 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32
 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
+extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
 
 #endif /* __KERNEL__ */
 
index 60394fbc4c704d62d05325c119f66da040416291..f780e72fc417e44b1701edd63d9af1ef271f703e 100644 (file)
@@ -106,7 +106,6 @@ struct rpc_rqst {
 
 struct rpc_xprt_ops {
        void            (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
-       char *          (*print_addr)(struct rpc_xprt *xprt, enum rpc_display_format_t format);
        int             (*reserve_xprt)(struct rpc_task *task);
        void            (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*rpcbind)(struct rpc_task *task);
@@ -126,8 +125,6 @@ struct rpc_xprt_ops {
 struct rpc_xprt {
        struct kref             kref;           /* Reference count */
        struct rpc_xprt_ops *   ops;            /* transport methods */
-       struct socket *         sock;           /* BSD socket layer */
-       struct sock *           inet;           /* INET layer */
 
        struct rpc_timeout      timeout;        /* timeout parms */
        struct sockaddr_storage addr;           /* server address */
@@ -137,9 +134,6 @@ struct rpc_xprt {
        unsigned long           cong;           /* current congestion */
        unsigned long           cwnd;           /* congestion window */
 
-       size_t                  rcvsize,        /* transport rcv buffer size */
-                               sndsize;        /* transport send buffer size */
-
        size_t                  max_payload;    /* largest RPC payload size,
                                                   in bytes */
        unsigned int            tsh_size;       /* size of transport specific
@@ -157,28 +151,12 @@ struct rpc_xprt {
        unsigned char           shutdown   : 1, /* being shut down */
                                resvport   : 1; /* use a reserved port */
 
-       /*
-        * XID
-        */
-       __u32                   xid;            /* Next XID value to use */
-
-       /*
-        * State of TCP reply receive stuff
-        */
-       __be32                  tcp_recm,       /* Fragment header */
-                               tcp_xid;        /* Current XID */
-       u32                     tcp_reclen,     /* fragment length */
-                               tcp_offset;     /* fragment offset */
-       unsigned long           tcp_copied,     /* copied to request */
-                               tcp_flags;
        /*
         * Connection of transports
         */
        unsigned long           connect_timeout,
                                bind_timeout,
                                reestablish_timeout;
-       struct work_struct      connect_worker;
-       unsigned short          port;
 
        /*
         * Disconnection of idle transports
@@ -193,8 +171,8 @@ struct rpc_xprt {
         */
        spinlock_t              transport_lock; /* lock transport info */
        spinlock_t              reserve_lock;   /* lock slot table */
+       u32                     xid;            /* Next XID value to use */
        struct rpc_task *       snd_task;       /* Task blocked in send */
-
        struct list_head        recv;
 
        struct {
@@ -210,18 +188,9 @@ struct rpc_xprt {
                                        bklog_u;        /* backlog queue utilization */
        } stat;
 
-       void                    (*old_data_ready)(struct sock *, int);
-       void                    (*old_state_change)(struct sock *);
-       void                    (*old_write_space)(struct sock *);
-
        char *                  address_strings[RPC_DISPLAY_MAX];
 };
 
-#define XPRT_LAST_FRAG         (1 << 0)
-#define XPRT_COPY_RECM         (1 << 1)
-#define XPRT_COPY_XID          (1 << 2)
-#define XPRT_COPY_DATA         (1 << 3)
-
 #ifdef __KERNEL__
 
 /*
@@ -270,8 +239,8 @@ void                        xprt_disconnect(struct rpc_xprt *xprt);
 /*
  * Socket transport setup operations
  */
-int                    xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
-int                    xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
+struct rpc_xprt *      xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
+struct rpc_xprt *      xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to);
 
 /*
  * Reserved bit positions in xprt->state
index b1237f16ecde3008c2f5a5ee592c78a97fefee20..bf99bd49f8efd3af5f13004ca3c84c57bc532e91 100644 (file)
@@ -9,10 +9,13 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 
-/* page backup entry */
+/* struct pbe is used for creating lists of pages that should be restored
+ * atomically during the resume from disk, because the page frames they have
+ * occupied before the suspend are in use.
+ */
 struct pbe {
-       unsigned long address;          /* address of the copy */
-       unsigned long orig_address;     /* original address of page */
+       void *address;          /* address of the copy */
+       void *orig_address;     /* original address of a page */
        struct pbe *next;
 };
 
index e7c36ba2a2dbe2466b5aa8ea4e4b6e406f4ed4e4..add51cebc8d9dad3999f2b9842a55f7fcce24f47 100644 (file)
@@ -218,8 +218,6 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct file *, struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-                               struct bio **bio_chain);
 extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
 
 /* linux/mm/swap_state.c */
@@ -247,9 +245,10 @@ extern int swap_duplicate(swp_entry_t);
 extern int valid_swaphandles(swp_entry_t, unsigned long *);
 extern void swap_free(swp_entry_t);
 extern void free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t);
+extern int swap_type_of(dev_t, sector_t);
 extern unsigned int count_swap_pages(int, int);
 extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t swapdev_block(int, pgoff_t);
 extern struct swap_info_struct *get_swap_info_struct(unsigned);
 extern int can_share_swap_page(struct page *);
 extern int remove_exclusive_swap_page(struct page *);
@@ -259,7 +258,6 @@ extern spinlock_t swap_lock;
 
 /* linux/mm/thrash.c */
 extern struct mm_struct * swap_token_mm;
-extern unsigned long swap_token_default_timeout;
 extern void grab_swap_token(void);
 extern void __put_swap_token(struct mm_struct *);
 
index 6562a2050a25a6c0d5762b338d80ea8e1c81d476..7e9680f4afdd9290415c1a8b4ccf8594bf29bd14 100644 (file)
 #include <net/genetlink.h>
 
 #ifdef CONFIG_TASKSTATS
-extern kmem_cache_t *taskstats_cache;
+extern struct kmem_cache *taskstats_cache;
 extern struct mutex taskstats_exit_mutex;
 
-static inline void taskstats_exit_free(struct taskstats *tidstats)
-{
-       if (tidstats)
-               kmem_cache_free(taskstats_cache, tidstats);
-}
-
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {
        sig->stats = NULL;
 }
 
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{
-       struct signal_struct *sig = tsk->signal;
-       struct taskstats *stats;
-
-       if (sig->stats != NULL)
-               return;
-
-       /* No problem if kmem_cache_zalloc() fails */
-       stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (!sig->stats) {
-               sig->stats = stats;
-               stats = NULL;
-       }
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-       if (stats)
-               kmem_cache_free(taskstats_cache, stats);
-}
-
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {
        if (sig->stats)
                kmem_cache_free(taskstats_cache, sig->stats);
 }
 
-extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
-extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
+extern void taskstats_exit(struct task_struct *, int group_dead);
 extern void taskstats_init_early(void);
 #else
-static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
-{}
-static inline void taskstats_exit_free(struct taskstats *ptidstats)
-{}
-static inline void taskstats_exit_send(struct task_struct *tsk,
-                                      struct taskstats *tidstats,
-                                      int group_dead, unsigned int cpu)
+static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
 {}
 static inline void taskstats_tgid_init(struct signal_struct *sig)
 {}
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{}
 static inline void taskstats_tgid_free(struct signal_struct *sig)
 {}
 static inline void taskstats_init_early(void)
index 65321f911c1e4be2769e9c8417dd919140446e90..f717f0898238c33236415dabbc362d80e4f712bf 100644 (file)
@@ -53,7 +53,7 @@ struct tty_buffer {
 };
 
 struct tty_bufhead {
-       struct work_struct              work;
+       struct delayed_work work;
        struct semaphore pty_sem;
        spinlock_t lock;
        struct tty_buffer *head;        /* Queue head */
index 745c409ebbb534294b250b1735da43e2dbc6f755..0351bf2fac85779839842a60ae8012e3fbaf81c6 100644 (file)
@@ -136,15 +136,19 @@ typedef           __s64           int64_t;
  *
  * Linux always considers sectors to be 512 bytes long independently
  * of the devices real block size.
- *
- * If required, asm/types.h can override it and define
- * HAVE_SECTOR_T
  */
-#ifndef HAVE_SECTOR_T
+#ifdef CONFIG_LBD
+typedef u64 sector_t;
+#else
 typedef unsigned long sector_t;
 #endif
 
-#ifndef HAVE_BLKCNT_T
+/*
+ * The type of the inode's block count.
+ */
+#ifdef CONFIG_LSF
+typedef u64 blkcnt_t;
+#else
 typedef unsigned long blkcnt_t;
 #endif
 
index a48d7f11c7be06772327604dc40c3f91b0b57145..975c963e57898aee95fed95e129bd91a1578a414 100644 (file)
@@ -1,8 +1,43 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
+#include <linux/preempt.h>
 #include <asm/uaccess.h>
 
+/*
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
+ */
+static inline void pagefault_disable(void)
+{
+       inc_preempt_count();
+       /*
+        * make sure to have issued the store before a pagefault
+        * can hit.
+        */
+       barrier();
+}
+
+static inline void pagefault_enable(void)
+{
+       /*
+        * make sure to issue those last loads/stores before enabling
+        * the pagefault handler again.
+        */
+       barrier();
+       dec_preempt_count();
+       /*
+        * make sure we do..
+        */
+       barrier();
+       preempt_check_resched();
+}
+
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@ static inline unsigned long __copy_from_user_nocache(void *to,
  * do_page_fault() doesn't attempt to take mmap_sem.  This makes
  * probe_kernel_address() suitable for use within regions where the caller
  * already holds mmap_sem, or other locks which nest inside mmap_sem.
+ * This must be a macro because __get_user() needs to know the types of the
+ * args.
+ *
+ * We don't include enough header files to be able to do the set_fs().  We
+ * require that the probe_kernel_address() caller will do that.
  */
 #define probe_kernel_address(addr, retval)             \
        ({                                              \
                long ret;                               \
+               mm_segment_t old_fs = get_fs();         \
                                                        \
-               inc_preempt_count();                    \
-               ret = __get_user(retval, addr);         \
-               dec_preempt_count();                    \
+               set_fs(KERNEL_DS);                      \
+               pagefault_disable();                    \
+               ret = __get_user(retval, (__force typeof(retval) __user *)(addr));              \
+               pagefault_enable();                     \
+               set_fs(old_fs);                         \
                ret;                                    \
        })
 
index 0cd73edeef139f3d0a4d2bd8d1b43151d8b51eb4..aab5b1b720218b36b8ecb602f1bf44931b8c9c31 100644 (file)
@@ -388,7 +388,7 @@ struct usb_device {
 
        int pm_usage_cnt;               /* usage counter for autosuspend */
 #ifdef CONFIG_PM
-       struct work_struct autosuspend; /* for delayed autosuspends */
+       struct delayed_work autosuspend; /* for delayed autosuspends */
        struct mutex pm_mutex;          /* protects PM operations */
 
        unsigned auto_pm:1;             /* autosuspend/resume in progress */
index 9bca3539a1e5a965023bd20c148320d96f7d1e27..edef8d50b26bd668b415e0758f16961068e28339 100644 (file)
 
 struct workqueue_struct;
 
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+
 struct work_struct {
-       unsigned long pending;
+       /* the first word is the work queue pointer and the flags rolled into
+        * one */
+       unsigned long management;
+#define WORK_STRUCT_PENDING 0          /* T if work item pending execution */
+#define WORK_STRUCT_NOAUTOREL 1                /* F if work item automatically released on exec */
+#define WORK_STRUCT_FLAG_MASK (3UL)
+#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
        struct list_head entry;
-       void (*func)(void *);
-       void *data;
-       void *wq_data;
+       work_func_t func;
+};
+
+struct delayed_work {
+       struct work_struct work;
        struct timer_list timer;
 };
 
@@ -24,77 +35,160 @@ struct execute_work {
        struct work_struct work;
 };
 
-#define __WORK_INITIALIZER(n, f, d) {                          \
+#define __WORK_INITIALIZER(n, f) {                             \
+       .management = 0,                                        \
+        .entry = { &(n).entry, &(n).entry },                   \
+       .func = (f),                                            \
+       }
+
+#define __WORK_INITIALIZER_NAR(n, f) {                         \
+       .management = (1 << WORK_STRUCT_NOAUTOREL),             \
         .entry = { &(n).entry, &(n).entry },                   \
        .func = (f),                                            \
-       .data = (d),                                            \
+       }
+
+#define __DELAYED_WORK_INITIALIZER(n, f) {                     \
+       .work = __WORK_INITIALIZER((n).work, (f)),              \
+       .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
+       }
+
+#define __DELAYED_WORK_INITIALIZER_NAR(n, f) {                 \
+       .work = __WORK_INITIALIZER_NAR((n).work, (f)),          \
        .timer = TIMER_INITIALIZER(NULL, 0, 0),                 \
        }
 
-#define DECLARE_WORK(n, f, d)                                  \
-       struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define DECLARE_WORK(n, f)                                     \
+       struct work_struct n = __WORK_INITIALIZER(n, f)
+
+#define DECLARE_WORK_NAR(n, f)                                 \
+       struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
+
+#define DECLARE_DELAYED_WORK(n, f)                             \
+       struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
+
+#define DECLARE_DELAYED_WORK_NAR(n, f)                 \
+       struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
 
 /*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function pointer
  */
-#define PREPARE_WORK(_work, _func, _data)                      \
+#define PREPARE_WORK(_work, _func)                             \
        do {                                                    \
-               (_work)->func = _func;                          \
-               (_work)->data = _data;                          \
+               (_work)->func = (_func);                        \
        } while (0)
 
+#define PREPARE_DELAYED_WORK(_work, _func)                     \
+       PREPARE_WORK(&(_work)->work, (_func))
+
 /*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
  */
-#define INIT_WORK(_work, _func, _data)                         \
+#define INIT_WORK(_work, _func)                                        \
        do {                                                    \
+               (_work)->management = 0;                        \
                INIT_LIST_HEAD(&(_work)->entry);                \
-               (_work)->pending = 0;                           \
-               PREPARE_WORK((_work), (_func), (_data));        \
+               PREPARE_WORK((_work), (_func));                 \
+       } while (0)
+
+#define INIT_WORK_NAR(_work, _func)                                    \
+       do {                                                            \
+               (_work)->management = (1 << WORK_STRUCT_NOAUTOREL);     \
+               INIT_LIST_HEAD(&(_work)->entry);                        \
+               PREPARE_WORK((_work), (_func));                         \
+       } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func)                                \
+       do {                                                    \
+               INIT_WORK(&(_work)->work, (_func));             \
+               init_timer(&(_work)->timer);                    \
+       } while (0)
+
+#define INIT_DELAYED_WORK_NAR(_work, _func)                    \
+       do {                                                    \
+               INIT_WORK_NAR(&(_work)->work, (_func));         \
                init_timer(&(_work)->timer);                    \
        } while (0)
 
+/**
+ * work_pending - Find out whether a work item is currently pending
+ * @work: The work item in question
+ */
+#define work_pending(work) \
+       test_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+/**
+ * delayed_work_pending - Find out whether a delayable work item is currently
+ * pending
+ * @work: The work item in question
+ */
+#define delayed_work_pending(work) \
+       test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
+
+/**
+ * work_release - Release a work item under execution
+ * @work: The work item to release
+ *
+ * This is used to release a work item that has been initialised with automatic
+ * release mode disabled (WORK_STRUCT_NOAUTOREL is set).  This gives the work
+ * function the opportunity to grab auxiliary data from the container of the
+ * work_struct before clearing the pending bit as the work_struct may be
+ * subject to deallocation the moment the pending bit is cleared.
+ *
+ * In such a case, this should be called in the work function after it has
+ * fetched any data it may require from the containter of the work_struct.
+ * After this function has been called, the work_struct may be scheduled for
+ * further execution or it may be deallocated unless other precautions are
+ * taken.
+ *
+ * This should also be used to release a delayed work item.
+ */
+#define work_release(work) \
+       clear_bit(WORK_STRUCT_PENDING, &(work)->management)
+
+
 extern struct workqueue_struct *__create_workqueue(const char *name,
-                                                   int singlethread);
-#define create_workqueue(name) __create_workqueue((name), 0)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1)
+                                                   int singlethread,
+                                                   int freezeable);
+#define create_workqueue(name) __create_workqueue((name), 0, 0)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
 extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-       struct work_struct *work, unsigned long delay);
+       struct delayed_work *work, unsigned long delay);
 extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
 
 extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(run_scheduled_work(struct work_struct *work));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
 
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
-extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int schedule_on_each_cpu(work_func_t func);
 extern void flush_scheduled_work(void);
 extern int current_is_keventd(void);
 extern int keventd_up(void);
 
 extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
-                                      struct work_struct *);
-int execute_in_process_context(void (*fn)(void *), void *,
-                              struct execute_work *);
+                                      struct delayed_work *);
+int execute_in_process_context(work_func_t fn, struct execute_work *);
 
 /*
  * Kill off a pending schedule_delayed_work().  Note that the work callback
  * function may still be running on return from cancel_delayed_work().  Run
  * flush_scheduled_work() to wait on it.
  */
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
 {
        int ret;
 
        ret = del_timer_sync(&work->timer);
        if (ret)
-               clear_bit(0, &work->pending);
+               clear_bit(WORK_STRUCT_PENDING, &work->work.management);
        return ret;
 }
 
index e156e38e4ac37db61db754822e59877299e2f9a0..62b7e7598e9adfbb246abb5a692515fbf0e99e0e 100644 (file)
@@ -98,7 +98,7 @@ struct dst_ops
        int                     entry_size;
 
        atomic_t                entries;
-       kmem_cache_t            *kmem_cachep;
+       struct kmem_cache               *kmem_cachep;
 };
 
 #ifdef __KERNEL__
index 617b672b1132e7fa3ff5f9c940b1692520dc8483..89119277553dbb8dbe90085088d4242c27f3c023 100644 (file)
@@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info {
        /* Scan retries remaining */
        int scan_retry;
 
-       struct work_struct work;
-       struct work_struct timeout;
+       struct delayed_work work;
+       struct delayed_work timeout;
 };
 
 struct ieee80211softmac_bss_info {
index a9eb2eaf094eaf1e9d54c61f070359118309aac8..34cc76e3ddb42139bdd7d9b48971bb5d174c7130 100644 (file)
@@ -125,7 +125,7 @@ struct inet_hashinfo {
        rwlock_t                        lhash_lock ____cacheline_aligned;
        atomic_t                        lhash_users;
        wait_queue_head_t               lhash_wait;
-       kmem_cache_t                    *bind_bucket_cachep;
+       struct kmem_cache                       *bind_bucket_cachep;
 };
 
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -136,10 +136,10 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
 }
 
 extern struct inet_bind_bucket *
-                   inet_bind_bucket_create(kmem_cache_t *cachep,
+                   inet_bind_bucket_create(struct kmem_cache *cachep,
                                            struct inet_bind_hashbucket *head,
                                            const unsigned short snum);
-extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
+extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
                                     struct inet_bind_bucket *tb);
 
 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
index 5f48748fe017557e0ec61f1d0a5bce47800387c8..f7be1ac736016e404f30e462b80aa6f42c35e922 100644 (file)
@@ -84,7 +84,7 @@ struct inet_timewait_death_row {
 };
 
 extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(void *data);
+extern void inet_twdr_twkill_work(struct work_struct *work);
 extern void inet_twdr_twcal_tick(unsigned long data);
 
 #if (BITS_PER_LONG == 64)
index 492dedaa8ac16dacd4cbab84484293a61e636f72..1720539ac2c1b0270e1358e6a6541e34dc709005 100644 (file)
@@ -28,6 +28,8 @@
 void irlan_check_command_param(struct irlan_cb *self, char *param, 
                               char *value);
 void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
+#ifdef CONFIG_PROC_FS
 void irlan_print_filter(struct seq_file *seq, int filter_type);
+#endif
 
 #endif /* IRLAN_FILTER_H */
index c8aacbd2e3331198fbde8125b1829e61f779a043..23967031ddb7f4f9bffbdb6974aed85b31d9f91c 100644 (file)
@@ -160,7 +160,7 @@ struct neigh_table
        atomic_t                entries;
        rwlock_t                lock;
        unsigned long           last_rand;
-       kmem_cache_t            *kmem_cachep;
+       struct kmem_cache               *kmem_cachep;
        struct neigh_statistics *stats;
        struct neighbour        **hash_buckets;
        unsigned int            hash_mask;
index cef3136e22a32b0799ca7bbbb50012dd9c93e90a..41bcc9eb42062044a3bb47c7442884002e0f1136 100644 (file)
@@ -7,7 +7,7 @@
 #include <net/netfilter/nf_conntrack.h>
 
 extern struct list_head nf_conntrack_expect_list;
-extern kmem_cache_t *nf_conntrack_expect_cachep;
+extern struct kmem_cache *nf_conntrack_expect_cachep;
 extern struct file_operations exp_file_ops;
 
 struct nf_conntrack_expect
index e37baaf2080b24c780662fa59a7c1da5f6030ea8..7aed02ce2b65b502dd97f72f91417ed5c78e5ca8 100644 (file)
@@ -29,7 +29,7 @@ struct proto;
 struct request_sock_ops {
        int             family;
        int             obj_size;
-       kmem_cache_t    *slab;
+       struct kmem_cache       *slab;
        int             (*rtx_syn_ack)(struct sock *sk,
                                       struct request_sock *req,
                                       struct dst_entry *dst);
@@ -60,7 +60,7 @@ struct request_sock {
 
 static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
 {
-       struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+       struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
 
        if (req != NULL)
                req->rsk_ops = ops;
index f8cbe40f52c00aa7215a35af4ee49ec082ce3ba7..c089f93ba591e24d674fe9292a2c5a5c9639f034 100644 (file)
@@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *);
 void sctp_inq_free(struct sctp_inq *);
 void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet);
 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *);
-void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
+void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t);
 
 /* This is the structure we use to hold outbound chunks.  You push
  * chunks in and they automatically pop out the other end as bundled
index fe3a33fad03fcd1a553babe753a427e057269123..03684e702d13464cbc0a48b95354dc540341f504 100644 (file)
@@ -571,7 +571,7 @@ struct proto {
        int                     *sysctl_rmem;
        int                     max_header;
 
-       kmem_cache_t            *slab;
+       struct kmem_cache               *slab;
        unsigned int            obj_size;
 
        atomic_t                *orphan_count;
@@ -746,6 +746,25 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
  */
 #define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
 
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key)      \
+do {                                                                   \
+       sk->sk_lock.owner = NULL;                                       \
+       init_waitqueue_head(&sk->sk_lock.wq);                           \
+       spin_lock_init(&(sk)->sk_lock.slock);                           \
+       debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
+                       sizeof((sk)->sk_lock));                         \
+       lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
+                       (skey), (sname));                               \
+       lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
+} while (0)
+
 extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
 
 static inline void lock_sock(struct sock *sk)
index d7a306ea560d96b94eb8c28c11c8ee958d1f0655..1e1ee3253fd8fd9a46d24778f9534e7c042f98a4 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/sock.h>
 
 struct timewait_sock_ops {
-       kmem_cache_t    *twsk_slab;
+       struct kmem_cache       *twsk_slab;
        unsigned int    twsk_obj_size;
        int             (*twsk_unique)(struct sock *sk,
                                       struct sock *sktw, void *twp);
index 15ec19dcf9c886864374a044e636c941ab8e0903..e4765413cf80c7ee9a5f4997796f6ee7ef93a206 100644 (file)
@@ -392,6 +392,20 @@ extern int xfrm_unregister_km(struct xfrm_mgr *km);
 
 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
 
+/* Audit Information */
+struct xfrm_audit
+{
+       uid_t   loginuid;
+       u32     secid;
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
+                   struct xfrm_policy *xp, struct xfrm_state *x);
+#else
+#define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0)
+#endif /* CONFIG_AUDITSYSCALL */
+
 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
 {
        if (likely(policy != NULL))
@@ -906,7 +920,7 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s
 #endif
 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
 extern int xfrm_state_delete(struct xfrm_state *x);
-extern void xfrm_state_flush(u8 proto);
+extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
 extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
@@ -959,13 +973,13 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
                                          struct xfrm_selector *sel,
                                          struct xfrm_sec_ctx *ctx, int delete);
 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
-void xfrm_policy_flush(u8 type);
+void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
 void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi);
-struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 
-                                 xfrm_address_t *daddr, xfrm_address_t *saddr, 
+struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
+                                 xfrm_address_t *daddr, xfrm_address_t *saddr,
                                  int create, unsigned short family);
-extern void xfrm_policy_flush(u8 type);
+extern void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
                          struct flowi *fl, int family, int strict);
index ede639812f8a534a66925ff804c26dc0c601effd..623a0fc0dae1a21adbb4221d816c87022a782412 100644 (file)
@@ -262,9 +262,10 @@ struct pcmcia_socket {
                u8                      present:1,      /* PCMCIA card is present in socket */
                                        busy:1,         /* "master" ioctl is used */
                                        dead:1,         /* pcmcia module is being unloaded */
-                                       device_add_pending:1, /* a pseudo-multifunction-device
+                                       device_add_pending:1, /* a multifunction-device
                                                               * add event is pending */
-                                       reserved:4;
+                                       mfc_pfc:1,      /* the pending event adds a mfc (1) or pfc (0) */
+                                       reserved:3;
        }                               pcmcia_state;
 
        struct work_struct              device_add;     /* for adding further pseudo-multifunction
index 1d77b63c5ea4c64882b3e7d37d0c650941729f4a..0c775fceb675b052bece78b80c885aaf64f0723e 100644 (file)
@@ -201,9 +201,14 @@ struct domain_device {
         void *lldd_dev;
 };
 
+struct sas_discovery_event {
+       struct work_struct work;
+       struct asd_sas_port *port;
+};
+
 struct sas_discovery {
        spinlock_t disc_event_lock;
-       struct work_struct disc_work[DISC_NUM_EVENTS];
+       struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
        unsigned long    pending;
        u8     fanout_sas_addr[8];
        u8     eeds_a[8];
@@ -249,14 +254,19 @@ struct asd_sas_port {
        void *lldd_port;          /* not touched by the sas class code */
 };
 
+struct asd_sas_event {
+       struct work_struct work;
+       struct asd_sas_phy *phy;
+};
+
 /* The phy pretty much is controlled by the LLDD.
  * The class only reads those fields.
  */
 struct asd_sas_phy {
 /* private: */
        /* protected by ha->event_lock */
-       struct work_struct   port_events[PORT_NUM_EVENTS];
-       struct work_struct   phy_events[PHY_NUM_EVENTS];
+       struct asd_sas_event   port_events[PORT_NUM_EVENTS];
+       struct asd_sas_event   phy_events[PHY_NUM_EVENTS];
 
        unsigned long port_events_pending;
        unsigned long phy_events_pending;
@@ -308,10 +318,15 @@ struct scsi_core {
        int               queue_thread_kill;
 };
 
+struct sas_ha_event {
+       struct work_struct work;
+       struct sas_ha_struct *ha;
+};
+
 struct sas_ha_struct {
 /* private: */
        spinlock_t       event_lock;
-       struct work_struct ha_events[HA_NUM_EVENTS];
+       struct sas_ha_event ha_events[HA_NUM_EVENTS];
        unsigned long    pending;
 
        struct scsi_core core;
@@ -339,6 +354,8 @@ struct sas_ha_struct {
        void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
 
        void *lldd_ha;            /* not touched by sas class code */
+
+       struct list_head eh_done_q;
 };
 
 #define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -527,17 +544,20 @@ struct sas_task {
 
        void   *lldd_task;        /* for use by LLDDs */
        void   *uldd_task;
+
+       struct work_struct abort_work;
 };
 
 
 
-#define SAS_TASK_STATE_PENDING  1
-#define SAS_TASK_STATE_DONE     2
-#define SAS_TASK_STATE_ABORTED  4
+#define SAS_TASK_STATE_PENDING      1
+#define SAS_TASK_STATE_DONE         2
+#define SAS_TASK_STATE_ABORTED      4
+#define SAS_TASK_INITIATOR_ABORTED  8
 
 static inline struct sas_task *sas_alloc_task(gfp_t flags)
 {
-       extern kmem_cache_t *sas_task_cache;
+       extern struct kmem_cache *sas_task_cache;
        struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
 
        if (task) {
@@ -555,7 +575,7 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags)
 static inline void sas_free_task(struct sas_task *task)
 {
        if (task) {
-               extern kmem_cache_t *sas_task_cache;
+               extern struct kmem_cache *sas_task_cache;
                BUG_ON(!list_empty(&task->list));
                kmem_cache_free(sas_task_cache, task);
        }
@@ -593,6 +613,7 @@ struct sas_domain_function_template {
 extern int sas_register_ha(struct sas_ha_struct *);
 extern int sas_unregister_ha(struct sas_ha_struct *);
 
+int sas_phy_reset(struct sas_phy *phy, int hard_reset);
 extern int sas_queuecommand(struct scsi_cmnd *,
                     void (*scsi_done)(struct scsi_cmnd *));
 extern int sas_target_alloc(struct scsi_target *);
@@ -625,4 +646,6 @@ void sas_unregister_dev(struct domain_device *);
 
 void sas_init_dev(struct domain_device *);
 
+void sas_task_abort(struct work_struct *);
+
 #endif /* _SASLIB_H_ */
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h
new file mode 100644 (file)
index 0000000..d143171
--- /dev/null
@@ -0,0 +1,77 @@
+#ifndef __LIBSRP_H__
+#define __LIBSRP_H__
+
+#include <linux/list.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/srp.h>
+
+enum iue_flags {
+       V_DIOVER,
+       V_WRITE,
+       V_LINKED,
+       V_FLYING,
+};
+
+struct srp_buf {
+       dma_addr_t dma;
+       void *buf;
+};
+
+struct srp_queue {
+       void *pool;
+       void *items;
+       struct kfifo *queue;
+       spinlock_t lock;
+};
+
+struct srp_target {
+       struct Scsi_Host *shost;
+       struct device *dev;
+
+       spinlock_t lock;
+       struct list_head cmd_queue;
+
+       size_t srp_iu_size;
+       struct srp_queue iu_queue;
+       size_t rx_ring_size;
+       struct srp_buf **rx_ring;
+
+       void *ldata;
+};
+
+struct iu_entry {
+       struct srp_target *target;
+
+       struct list_head ilist;
+       dma_addr_t remote_token;
+       unsigned long flags;
+
+       struct srp_buf *sbuf;
+};
+
+typedef int (srp_rdma_t)(struct scsi_cmnd *, struct scatterlist *, int,
+                        struct srp_direct_buf *, int,
+                        enum dma_data_direction, unsigned int);
+extern int srp_target_alloc(struct srp_target *, struct device *, size_t, size_t);
+extern void srp_target_free(struct srp_target *);
+
+extern struct iu_entry *srp_iu_get(struct srp_target *);
+extern void srp_iu_put(struct iu_entry *);
+
+extern int srp_cmd_queue(struct Scsi_Host *, struct srp_cmd *, void *, u64);
+extern int srp_transfer_data(struct scsi_cmnd *, struct srp_cmd *,
+                            srp_rdma_t, int, int);
+
+
+static inline struct srp_target *host_to_srp_target(struct Scsi_Host *host)
+{
+       return (struct srp_target *) host->hostdata;
+}
+
+static inline int srp_cmd_direction(struct srp_cmd *cmd)
+{
+       return (cmd->buf_fmt >> 4) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
+#endif
index be117f812deb95c8064d98c3e9125223b88160a2..d6948d0e8cdb47d2347f2f2e9c9b991e7788b804 100644 (file)
@@ -8,6 +8,7 @@
 
 struct request;
 struct scatterlist;
+struct Scsi_Host;
 struct scsi_device;
 
 
@@ -72,6 +73,9 @@ struct scsi_cmnd {
        unsigned short use_sg;  /* Number of pieces of scatter-gather */
        unsigned short sglist_len;      /* size of malloc'd scatter-gather list */
 
+       /* offset in cmd we are at (for multi-transfer tgt cmds) */
+       unsigned offset;
+
        unsigned underflow;     /* Return error if less than
                                   this amount is transferred */
 
@@ -119,7 +123,10 @@ struct scsi_cmnd {
 };
 
 extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
+extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
 extern void scsi_put_command(struct scsi_cmnd *);
+extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
+                              struct device *);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_finish_command(struct scsi_cmnd *cmd);
 extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
@@ -128,4 +135,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
                                 size_t *offset, size_t *len);
 extern void scsi_kunmap_atomic_sg(void *virt);
 
+extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
+extern void scsi_free_sgtable(struct scatterlist *, int);
+
 #endif /* _SCSI_SCSI_CMND_H */
index b401c82036be216edf1dadf0f1ce01c0b1301891..ebf31b16dc49eee41786123bad30f36405faae20 100644 (file)
@@ -223,13 +223,13 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
                                                  struct scsi_device *);
 
 /**
- * shost_for_each_device  -  iterate over all devices of a host
- * @sdev:      iterator
- * @host:      host whiches devices we want to iterate over
+ * shost_for_each_device - iterate over all devices of a host
+ * @sdev: the &struct scsi_device to use as a cursor
+ * @shost: the &struct scsi_host to iterate over
  *
- * This traverses over each devices of @shost.  The devices have
- * a reference that must be released by scsi_host_put when breaking
- * out of the loop.
+ * Iterator that returns each device attached to @shost.  This loop
+ * takes a reference on each device and releases it at the end.  If
+ * you break out of the loop, you must call scsi_device_put(sdev).
  */
 #define shost_for_each_device(sdev, shost) \
        for ((sdev) = __scsi_iterate_devices((shost), NULL); \
@@ -237,17 +237,17 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
             (sdev) = __scsi_iterate_devices((shost), (sdev)))
 
 /**
- * __shost_for_each_device  -  iterate over all devices of a host (UNLOCKED)
- * @sdev:      iterator
- * @host:      host whiches devices we want to iterate over
+ * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
+ * @sdev: the &struct scsi_device to use as a cursor
+ * @shost: the &struct scsi_host to iterate over
  *
- * This traverses over each devices of @shost.  It does _not_ take a
- * reference on the scsi_device, thus it the whole loop must be protected
- * by shost->host_lock.
+ * Iterator that returns each device attached to @shost.  It does _not_
+ * take a reference on the scsi_device, so the whole loop must be
+ * protected by shost->host_lock.
  *
- * Note:  The only reason why drivers would want to use this is because
- * they're need to access the device list in irq context.  Otherwise you
- * really want to use shost_for_each_device instead.
+ * Note: The only reason to use this is because you need to access the
+ * device list in interrupt context.  Otherwise you really want to use
+ * shost_for_each_device instead.
  */
 #define __shost_for_each_device(sdev, shost) \
        list_for_each_entry((sdev), &((shost)->__devices), siblings)
index 39c6f8cc20c3c7984484d0ee5f57d3e5e1fd7646..7f1f411d07af04c47f1faf7ce6400b57c197fef2 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
 
+struct request_queue;
 struct block_device;
 struct completion;
 struct module;
@@ -123,6 +124,39 @@ struct scsi_host_template {
        int (* queuecommand)(struct scsi_cmnd *,
                             void (*done)(struct scsi_cmnd *));
 
+       /*
+        * The transfer functions are used to queue a scsi command to
+        * the LLD. When the driver is finished processing the command
+        * the done callback is invoked.
+        *
+        * return values: see queuecommand
+        *
+        * If the LLD accepts the cmd, it should set the result to an
+        * appropriate value when completed before calling the done function.
+        *
+        * STATUS: REQUIRED FOR TARGET DRIVERS
+        */
+       /* TODO: rename */
+       int (* transfer_response)(struct scsi_cmnd *,
+                                 void (*done)(struct scsi_cmnd *));
+       /*
+        * This is called to inform the LLD to transfer cmd->request_bufflen
+        * bytes of the cmd at cmd->offset in the cmd. The cmd->use_sg
+        * speciefies the number of scatterlist entried in the command
+        * and cmd->request_buffer contains the scatterlist.
+        *
+        * If the command cannot be processed in one transfer_data call
+        * becuase a scatterlist within the LLD's limits cannot be
+        * created then transfer_data will be called multiple times.
+        * It is initially called from process context, and later
+        * calls are from the interrup context.
+        */
+       int (* transfer_data)(struct scsi_cmnd *,
+                             void (*done)(struct scsi_cmnd *));
+
+       /* Used as callback for the completion of task management request. */
+       int (* tsk_mgmt_response)(u64 mid, int result);
+
        /*
         * This is an error handling strategy routine.  You don't need to
         * define one of these if you don't want to - there is a default
@@ -240,6 +274,24 @@ struct scsi_host_template {
         */
        void (* target_destroy)(struct scsi_target *);
 
+       /*
+        * If a host has the ability to discover targets on its own instead
+        * of scanning the entire bus, it can fill in this function and
+        * call scsi_scan_host().  This function will be called periodically
+        * until it returns 1 with the scsi_host and the elapsed time of
+        * the scan in jiffies.
+        *
+        * Status: OPTIONAL
+        */
+       int (* scan_finished)(struct Scsi_Host *, unsigned long);
+
+       /*
+        * If the host wants to be called before the scan starts, but
+        * after the midlayer has set up ready for the scan, it can fill
+        * in this function.
+        */
+       void (* scan_start)(struct Scsi_Host *);
+
        /*
         * fill in this function to allow the queue depth of this host
         * to be changeable (on a per device basis).  returns either
@@ -552,6 +604,9 @@ struct Scsi_Host {
        /* task mgmt function in progress */
        unsigned tmf_in_progress:1;
 
+       /* Asynchronous scan in progress */
+       unsigned async_scan:1;
+
        /*
         * Optional work queue to be utilized by the transport
         */
@@ -568,6 +623,12 @@ struct Scsi_Host {
         */
        unsigned int max_host_blocked;
 
+       /*
+        * q used for scsi_tgt msgs, async events or any other requests that
+        * need to be processed in userspace
+        */
+       struct request_queue *uspace_req_q;
+
        /* legacy crap */
        unsigned long base;
        unsigned long io_port;
@@ -648,11 +709,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state);
 
 extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *);
 
-static inline void scsi_assign_lock(struct Scsi_Host *shost, spinlock_t *lock)
-{
-       shost->host_lock = lock;
-}
-
 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
 {
         return shost->shost_gendev.parent;
@@ -671,6 +727,9 @@ extern void scsi_unblock_requests(struct Scsi_Host *);
 extern void scsi_block_requests(struct Scsi_Host *);
 
 struct class_container;
+
+extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+                                               void (*) (struct request_queue *));
 /*
  * These two functions are used to allocate and free a pseudo device
  * which will connect to the host adapter itself rather than any
diff --git a/include/scsi/scsi_tgt.h b/include/scsi/scsi_tgt.h
new file mode 100644 (file)
index 0000000..4f44279
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * SCSI target definitions
+ */
+
+#include <linux/dma-mapping.h>
+
+struct Scsi_Host;
+struct scsi_cmnd;
+struct scsi_lun;
+
+extern struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *);
+extern int scsi_tgt_alloc_queue(struct Scsi_Host *);
+extern void scsi_tgt_free_queue(struct Scsi_Host *);
+extern int scsi_tgt_queue_command(struct scsi_cmnd *, struct scsi_lun *, u64);
+extern int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *, int, u64, struct scsi_lun *,
+                                    void *);
+extern struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *,
+                                              enum dma_data_direction, gfp_t);
+extern void scsi_host_put_command(struct Scsi_Host *, struct scsi_cmnd *);
diff --git a/include/scsi/scsi_tgt_if.h b/include/scsi/scsi_tgt_if.h
new file mode 100644 (file)
index 0000000..46d5e70
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * SCSI target kernel/user interface
+ *
+ * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
+ * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef __SCSI_TARGET_IF_H
+#define __SCSI_TARGET_IF_H
+
+/* user -> kernel */
+#define        TGT_UEVENT_CMD_RSP      0x0001
+#define        TGT_UEVENT_TSK_MGMT_RSP 0x0002
+
+/* kernel -> user */
+#define        TGT_KEVENT_CMD_REQ      0x1001
+#define        TGT_KEVENT_CMD_DONE     0x1002
+#define        TGT_KEVENT_TSK_MGMT_REQ 0x1003
+
+struct tgt_event_hdr {
+       uint16_t version;
+       uint16_t status;
+       uint16_t type;
+       uint16_t len;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+struct tgt_event {
+       struct tgt_event_hdr hdr;
+
+       union {
+               /* user-> kernel */
+               struct {
+                       int host_no;
+                       uint32_t len;
+                       int result;
+                       aligned_u64 uaddr;
+                       uint8_t rw;
+                       aligned_u64 tag;
+               } cmd_rsp;
+               struct {
+                       int host_no;
+                       aligned_u64 mid;
+                       int result;
+               } tsk_mgmt_rsp;
+
+
+               /* kernel -> user */
+               struct {
+                       int host_no;
+                       uint32_t data_len;
+                       uint8_t scb[16];
+                       uint8_t lun[8];
+                       int attribute;
+                       aligned_u64 tag;
+               } cmd_req;
+               struct {
+                       int host_no;
+                       aligned_u64 tag;
+                       int result;
+               } cmd_done;
+               struct {
+                       int host_no;
+                       int function;
+                       aligned_u64 tag;
+                       uint8_t lun[8];
+                       aligned_u64 mid;
+               } tsk_mgmt_req;
+       } p;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+#define TGT_RING_SIZE (1UL << 16)
+#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
+#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
+#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
+
+#endif
index fd352323378bdb4421e73c352d8e299f2458aa45..798f7c7ee426d72033321d55af20594a53c865fc 100644 (file)
@@ -206,9 +206,9 @@ struct fc_rport {   /* aka fc_starget_attrs */
        u8 flags;
        struct list_head peers;
        struct device dev;
-       struct work_struct dev_loss_work;
+       struct delayed_work dev_loss_work;
        struct work_struct scan_work;
-       struct work_struct fail_io_work;
+       struct delayed_work fail_io_work;
        struct work_struct stgt_delete_work;
        struct work_struct rport_delete_work;
 } __attribute__((aligned(sizeof(unsigned long))));
index 4b95c89c95c9f14ad30a27e22f48b1740d2c7498..d5c218ddc527c457d58e410b52f71f4e189569f5 100644 (file)
@@ -176,7 +176,7 @@ struct iscsi_cls_session {
 
        /* recovery fields */
        int recovery_tmo;
-       struct work_struct recovery_work;
+       struct delayed_work recovery_work;
 
        int target_id;
 
index 53024377f3b8496e6c0cf3fe366903c4245feb44..59633a82de47cd54dc6fa2cb6fcf09b9a1e270d4 100644 (file)
@@ -73,6 +73,8 @@ struct sas_phy {
 
        /* for the list of phys belonging to a port */
        struct list_head        port_siblings;
+
+       struct work_struct      reset_work;
 };
 
 #define dev_to_phy(d) \
index 4c43521cc493bd382815193f1315e3c3c088cedf..33720397a904861434f05cace1952ae474d428b5 100644 (file)
@@ -511,7 +511,7 @@ struct snd_ac97 {
 #ifdef CONFIG_SND_AC97_POWER_SAVE
        unsigned int power_up;  /* power states */
        struct workqueue_struct *power_workq;
-       struct work_struct power_work;
+       struct delayed_work power_work;
 #endif
        struct device dev;
 };
index 11702aa0bea97a3c5448199aa7bd5f32b76bc267..2ee061625fd0e615cb331ecca276ee4a9ddc8b35 100644 (file)
@@ -182,7 +182,7 @@ struct ak4114 {
        unsigned char rcs0;
        unsigned char rcs1;
        struct workqueue_struct *workqueue;
-       struct work_struct work;
+       struct delayed_work work;
        void *change_callback_private;
        void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1);
 };
index 919a80cb322e6f57dfa2be0a3bb727251bcd7e0e..2cfd7cb36e7963bee6987c826c39a3411459666d 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/romfs_fs.h>
 #include <linux/initrd.h>
 #include <linux/sched.h>
+#include <linux/freezer.h>
 
 #include "do_mounts.h"
 
index d28c1094d7e5ca8be38b50109161470ef2b27537..85f04037ade137c6ce452f3183db87527f80e6cf 100644 (file)
@@ -182,6 +182,10 @@ static int __init do_collect(void)
 
 static int __init do_header(void)
 {
+       if (memcmp(collected, "070707", 6)==0) {
+               error("incorrect cpio method used: use -H newc option");
+               return 1;
+       }
        if (memcmp(collected, "070701", 6)) {
                error("no cpio magic");
                return 1;
index 36f608a7cfbaf0fe1e2d3b1f37ff42e945bc100d..1174ae3aec8c0741f7ec3303c241816613ce3ff0 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/percpu.h>
 #include <linux/kmod.h>
 #include <linux/kernel_stat.h>
+#include <linux/start_kernel.h>
 #include <linux/security.h>
 #include <linux/workqueue.h>
 #include <linux/profile.h>
 #error Sorry, your GCC is too old. It builds incorrect kernels.
 #endif
 
+#if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0
+#warning gcc-4.1.0 is known to miscompile the kernel.  A different compiler version is recommended.
+#endif
+
 static int init(void *);
 
 extern void init_IRQ(void);
index 4d20cfd38f0abbc6c9311deaa829e7b79aa12839..fa18141539fb38d4f6980c5b1f74b4c4dafa66e9 100644 (file)
@@ -115,7 +115,6 @@ struct compat_shm_info {
 
 extern int sem_ctls[];
 #define sc_semopm      (sem_ctls[2])
-#define MAXBUF (64*1024)
 
 static inline int compat_ipc_parse_version(int *cmd)
 {
@@ -307,35 +306,30 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
 
 long compat_sys_msgsnd(int first, int second, int third, void __user *uptr)
 {
-       struct msgbuf __user *p;
        struct compat_msgbuf __user *up = uptr;
        long type;
 
        if (first < 0)
                return -EINVAL;
-       if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+       if (second < 0)
                return -EINVAL;
 
-       p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-       if (get_user(type, &up->mtype) ||
-           put_user(type, &p->mtype) ||
-           copy_in_user(p->mtext, up->mtext, second))
+       if (get_user(type, &up->mtype))
                return -EFAULT;
 
-       return sys_msgsnd(first, p, second, third);
+       return do_msgsnd(first, type, up->mtext, second, third);
 }
 
 long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
                           int version, void __user *uptr)
 {
-       struct msgbuf __user *p;
        struct compat_msgbuf __user *up;
        long type;
        int err;
 
        if (first < 0)
                return -EINVAL;
-       if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf)))
+       if (second < 0)
                return -EINVAL;
 
        if (!version) {
@@ -349,14 +343,11 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
                uptr = compat_ptr(ipck.msgp);
                msgtyp = ipck.msgtyp;
        }
-       p = compat_alloc_user_space(second + sizeof(struct msgbuf));
-       err = sys_msgrcv(first, p, second, msgtyp, third);
+       up = uptr;
+       err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third);
        if (err < 0)
                goto out;
-       up = uptr;
-       if (get_user(type, &p->mtype) ||
-           put_user(type, &up->mtype) ||
-           copy_in_user(up->mtext, p->mtext, err))
+       if (put_user(type, &up->mtype))
                err = -EFAULT;
 out:
        return err;
index 7c274002c9f5911d21a5f6a704e85be40d1e1777..3acc1661e517674a7237431e3d361255a0a81164 100644 (file)
@@ -90,7 +90,7 @@ static struct super_operations mqueue_super_ops;
 static void remove_notification(struct mqueue_inode_info *info);
 
 static spinlock_t mq_lock;
-static kmem_cache_t *mqueue_inode_cachep;
+static struct kmem_cache *mqueue_inode_cachep;
 static struct vfsmount *mqueue_mnt;
 
 static unsigned int queues_count;
@@ -211,7 +211,7 @@ static int mqueue_get_sb(struct file_system_type *fs_type,
        return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt);
 }
 
-static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 
@@ -224,7 +224,7 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb)
 {
        struct mqueue_inode_info *ei;
 
-       ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
+       ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        return &ei->vfs_inode;
index 1266b1d0c8e33d47f25e46a692b53afd5186d5e6..a388824740e7e9c1e770dde9d3082609c0e8f017 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -626,12 +626,11 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
        return 0;
 }
 
-asmlinkage long
-sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+long do_msgsnd(int msqid, long mtype, void __user *mtext,
+               size_t msgsz, int msgflg)
 {
        struct msg_queue *msq;
        struct msg_msg *msg;
-       long mtype;
        int err;
        struct ipc_namespace *ns;
 
@@ -639,12 +638,10 @@ sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
 
        if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
                return -EINVAL;
-       if (get_user(mtype, &msgp->mtype))
-               return -EFAULT;
        if (mtype < 1)
                return -EINVAL;
 
-       msg = load_msg(msgp->mtext, msgsz);
+       msg = load_msg(mtext, msgsz);
        if (IS_ERR(msg))
                return PTR_ERR(msg);
 
@@ -723,6 +720,16 @@ out_free:
        return err;
 }
 
+asmlinkage long
+sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
+{
+       long mtype;
+
+       if (get_user(mtype, &msgp->mtype))
+               return -EFAULT;
+       return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
+}
+
 static inline int convert_mode(long *msgtyp, int msgflg)
 {
        /*
@@ -742,8 +749,8 @@ static inline int convert_mode(long *msgtyp, int msgflg)
        return SEARCH_EQUAL;
 }
 
-asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
-                          long msgtyp, int msgflg)
+long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+               size_t msgsz, long msgtyp, int msgflg)
 {
        struct msg_queue *msq;
        struct msg_msg *msg;
@@ -889,15 +896,30 @@ out_unlock:
                return PTR_ERR(msg);
 
        msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
-       if (put_user (msg->m_type, &msgp->mtype) ||
-           store_msg(msgp->mtext, msg, msgsz)) {
+       *pmtype = msg->m_type;
+       if (store_msg(mtext, msg, msgsz))
                msgsz = -EFAULT;
-       }
+
        free_msg(msg);
 
        return msgsz;
 }
 
+asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+                          long msgtyp, int msgflg)
+{
+       long err, mtype;
+
+       err =  do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
+       if (err < 0)
+               goto out;
+
+       if (put_user(mtype, &msgp->mtype))
+               err = -EFAULT;
+out:
+       return err;
+}
+
 #ifdef CONFIG_PROC_FS
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
 {
index 21b3289d640c1eb0989457f42f1eb4b62b675aff..d3e12efd55cb39e6cdce7cd48ac73833100f60cf 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1070,14 +1070,13 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
        ipc_rcu_getref(sma);
        sem_unlock(sma);
 
-       new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+       new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
                ipc_lock_by_ptr(&sma->sem_perm);
                ipc_rcu_putref(sma);
                sem_unlock(sma);
                return ERR_PTR(-ENOMEM);
        }
-       memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems);
        new->semadj = (short *) &new[1];
        new->semid = semid;
 
index cd8bb14a431f648a1327b3e84deb338e806b6a60..a9b7a227b8d4c7bde79998048206b96fef1f57ed 100644 (file)
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr)
        container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
 }
 
+static void ipc_do_vfree(struct work_struct *work)
+{
+       vfree(container_of(work, struct ipc_rcu_sched, work));
+}
+
 /**
  * ipc_schedule_free - free ipc + rcu space
  * @head: RCU callback structure for queued work
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head)
        struct ipc_rcu_sched *sched =
                        container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
 
-       INIT_WORK(&sched->work, vfree, sched);
+       INIT_WORK(&sched->work, ipc_do_vfree);
        schedule_work(&sched->work);
 }
 
index 248e1c396f8b062f9a0e30b3e607336be40cd967..4af15802ccd46e960a857c04db9afe2c0ba49dc3 100644 (file)
@@ -7,7 +7,7 @@ choice
        default HZ_250
        help
         Allows the configuration of the timer frequency. It is customary
-        to have the timer interrupt run at 1000 HZ but 100 HZ may be more
+        to have the timer interrupt run at 1000 Hz but 100 Hz may be more
         beneficial for servers and NUMA systems that do not need to have
         a fast response for user interaction and that may experience bus
         contention and cacheline bounces as a result of timer interrupts.
@@ -19,21 +19,30 @@ choice
        config HZ_100
                bool "100 HZ"
        help
-         100 HZ is a typical choice for servers, SMP and NUMA systems
+         100 Hz is a typical choice for servers, SMP and NUMA systems
          with lots of processors that may show reduced performance if
          too many timer interrupts are occurring.
 
        config HZ_250
                bool "250 HZ"
        help
-        250 HZ is a good compromise choice allowing server performance
+        250 Hz is a good compromise choice allowing server performance
         while also showing good interactive responsiveness even
-        on SMP and NUMA systems.
+        on SMP and NUMA systems. If you are going to be using NTSC video
+        or multimedia, selected 300Hz instead.
+
+       config HZ_300
+               bool "300 HZ"
+       help
+        300 Hz is a good compromise choice allowing server performance
+        while also showing good interactive responsiveness even
+        on SMP and NUMA systems and exactly dividing by both PAL and
+        NTSC frame rates for video and multimedia work.
 
        config HZ_1000
                bool "1000 HZ"
        help
-        1000 HZ is the preferred choice for desktop systems and other
+        1000 Hz is the preferred choice for desktop systems and other
         systems requiring fast interactive responses to events.
 
 endchoice
@@ -42,5 +51,6 @@ config HZ
        int
        default 100 if HZ_100
        default 250 if HZ_250
+       default 300 if HZ_300
        default 1000 if HZ_1000
 
index 0aad5ca36a8125e205e85978aa32e8712ddc957a..dc12db8600e7818e96dfb072cd5bdc549ec0eef3 100644 (file)
@@ -89,7 +89,8 @@ struct acct_glbs {
        struct timer_list       timer;
 };
 
-static struct acct_glbs acct_globals __cacheline_aligned = {SPIN_LOCK_UNLOCKED};
+static struct acct_glbs acct_globals __cacheline_aligned =
+       {__SPIN_LOCK_UNLOCKED(acct_globals.lock)};
 
 /*
  * Called whenever the timer says to check the free space.
index 98106f6078b0005a89ef889574441c663f40732f..d9b690ac684b9c6a638cee37d11949d62ef22992 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/netlink.h>
 #include <linux/selinux.h>
 #include <linux/inotify.h>
+#include <linux/freezer.h>
 
 #include "audit.h"
 
index 4f40d923af8ea2a349736c793d2164c72aa4fa63..2e896f8ae29e4ce81886f115395dcedd9f898acf 100644 (file)
@@ -636,10 +636,9 @@ static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
        struct audit_rule *rule;
        int i;
 
-       rule = kmalloc(sizeof(*rule), GFP_KERNEL);
+       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
        if (unlikely(!rule))
                return NULL;
-       memset(rule, 0, sizeof(*rule));
 
        rule->flags = krule->flags | krule->listnr;
        rule->action = krule->action;
index ab97e51012325b24191ec5a6f2c1e2514968dd02..40722e26de98e285c1f90ff810b20aff6d3cb9a2 100644 (file)
@@ -731,7 +731,7 @@ static inline void audit_free_context(struct audit_context *context)
                printk(KERN_ERR "audit: freed %d contexts\n", count);
 }
 
-static void audit_log_task_context(struct audit_buffer *ab)
+void audit_log_task_context(struct audit_buffer *ab)
 {
        char *ctx = NULL;
        ssize_t len = 0;
@@ -760,6 +760,8 @@ error_path:
        return;
 }
 
+EXPORT_SYMBOL(audit_log_task_context);
+
 static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
 {
        char name[sizeof(tsk->comm)];
@@ -1488,6 +1490,8 @@ uid_t audit_get_loginuid(struct audit_context *ctx)
        return ctx ? ctx->loginuid : -1;
 }
 
+EXPORT_SYMBOL(audit_get_loginuid);
+
 /**
  * __audit_mq_open - record audit data for a POSIX MQ open
  * @oflag: open flag
index f9e31974f4addf262cee19d1049f6b1fd55bc47e..8fa1fb28f8a79b895d6438a2485466d2ac8efda1 100644 (file)
@@ -75,7 +75,7 @@ ikconfig_read_current(struct file *file, char __user *buf,
        return count;
 }
 
-static struct file_operations ikconfig_file_ops = {
+static const struct file_operations ikconfig_file_ops = {
        .owner = THIS_MODULE,
        .read = ikconfig_read_current,
 };
index 272254f20d9744b4844cde0c6da1dc13210283c4..9124669f45860cac40c7b03060ef582b94d595ec 100644 (file)
@@ -270,11 +270,7 @@ int disable_nonboot_cpus(void)
                        goto out;
                }
        }
-       error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu));
-       if (error) {
-               printk(KERN_ERR "Could not run on CPU%d\n", first_cpu);
-               goto out;
-       }
+
        /* We take down all of the non-boot CPUs in one shot to avoid races
         * with the userspace trying to use the CPU hotplug at the same time
         */
index 6313c38c930e62025c0e76a057d83023b77a6bb5..0a6b4d89f9a031c5e394fb1487530e9ce7dc806b 100644 (file)
@@ -729,9 +729,11 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
        }
 
        /* Remaining checks don't apply to root cpuset */
-       if ((par = cur->parent) == NULL)
+       if (cur == &top_cpuset)
                return 0;
 
+       par = cur->parent;
+
        /* We must be a subset of our parent cpuset */
        if (!is_cpuset_subset(trial, par))
                return -EACCES;
@@ -1060,10 +1062,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
        cpu_exclusive_changed =
                (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
        mutex_lock(&callback_mutex);
-       if (turning_on)
-               set_bit(bit, &cs->flags);
-       else
-               clear_bit(bit, &cs->flags);
+       cs->flags = trialcs.flags;
        mutex_unlock(&callback_mutex);
 
        if (cpu_exclusive_changed)
@@ -1281,7 +1280,8 @@ typedef enum {
        FILE_TASKLIST,
 } cpuset_filetype_t;
 
-static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
+static ssize_t cpuset_common_file_write(struct file *file,
+                                       const char __user *userbuf,
                                        size_t nbytes, loff_t *unused_ppos)
 {
        struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
@@ -1292,7 +1292,7 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
        int retval = 0;
 
        /* Crude upper limit on largest legitimate cpulist user might write. */
-       if (nbytes > 100 + 6 * NR_CPUS)
+       if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES))
                return -E2BIG;
 
        /* +1 for nul-terminator */
@@ -1532,7 +1532,7 @@ static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
        return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
 }
 
-static struct file_operations cpuset_file_operations = {
+static const struct file_operations cpuset_file_operations = {
        .read = cpuset_file_read,
        .write = cpuset_file_write,
        .llseek = generic_file_llseek,
@@ -2045,7 +2045,6 @@ out:
        return err;
 }
 
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
 /*
  * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
  * or memory nodes, we need to walk over the cpuset hierarchy,
@@ -2109,9 +2108,7 @@ static void common_cpu_mem_hotplug_unplug(void)
        mutex_unlock(&callback_mutex);
        mutex_unlock(&manage_mutex);
 }
-#endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * The top_cpuset tracks what CPUs and Memory Nodes are online,
  * period.  This is necessary in order to make cpusets transparent
@@ -2128,7 +2125,6 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb,
        common_cpu_mem_hotplug_unplug();
        return 0;
 }
-#endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
@@ -2610,7 +2606,7 @@ static int cpuset_open(struct inode *inode, struct file *file)
        return single_open(file, proc_cpuset_show, pid);
 }
 
-struct file_operations proc_cpuset_operations = {
+const struct file_operations proc_cpuset_operations = {
        .open           = cpuset_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 66a0ea48751d647abb56f7e4741e3f0d02dd7b4e..766d5912b26a4533edd2b07ac767dd36db581ca3 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/delayacct.h>
 
 int delayacct_on __read_mostly = 1;    /* Delay accounting turned on/off */
-kmem_cache_t *delayacct_cache;
+struct kmem_cache *delayacct_cache;
 
 static int __init delayacct_setup_disable(char *str)
 {
@@ -41,7 +41,7 @@ void delayacct_init(void)
 
 void __delayacct_tsk_init(struct task_struct *tsk)
 {
-       tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
+       tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
        if (tsk->delays)
                spin_lock_init(&tsk->delays->lock);
 }
index 2020644c938a48d0f34670765e788b32fed8900d..937b13ca33baaac6a3ab01437b4016f9995a11f9 100644 (file)
@@ -140,7 +140,7 @@ static int proc_dma_open(struct inode *inode, struct file *file)
        return single_open(file, proc_dma_show, NULL);
 }
 
-static struct file_operations proc_dma_operations = {
+static const struct file_operations proc_dma_operations = {
        .open           = proc_dma_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 06de6c4e8ca3a66937ce8763d33ca42852fbb698..4e3f919edc4810eae59bd32a9369875febb36835 100644 (file)
@@ -850,9 +850,7 @@ static void exit_notify(struct task_struct *tsk)
 fastcall NORET_TYPE void do_exit(long code)
 {
        struct task_struct *tsk = current;
-       struct taskstats *tidstats;
        int group_dead;
-       unsigned int mycpu;
 
        profile_task_exit(tsk);
 
@@ -890,8 +888,6 @@ fastcall NORET_TYPE void do_exit(long code)
                                current->comm, current->pid,
                                preempt_count());
 
-       taskstats_exit_alloc(&tidstats, &mycpu);
-
        acct_update_integrals(tsk);
        if (tsk->mm) {
                update_hiwater_rss(tsk->mm);
@@ -911,8 +907,8 @@ fastcall NORET_TYPE void do_exit(long code)
 #endif
        if (unlikely(tsk->audit_context))
                audit_free(tsk);
-       taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
-       taskstats_exit_free(tidstats);
+
+       taskstats_exit(tsk, group_dead);
 
        exit_mm(tsk);
 
index 8cdd3e72ba55f9ee52b098e67b1b7d4fcd04aa58..7f2e31ba33af16b2e6cde516b9e51c63bcc64383 100644 (file)
@@ -82,26 +82,26 @@ int nr_processes(void)
 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
 # define alloc_task_struct()   kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
-static kmem_cache_t *task_struct_cachep;
+static struct kmem_cache *task_struct_cachep;
 #endif
 
 /* SLAB cache for signal_struct structures (tsk->signal) */
-static kmem_cache_t *signal_cachep;
+static struct kmem_cache *signal_cachep;
 
 /* SLAB cache for sighand_struct structures (tsk->sighand) */
-kmem_cache_t *sighand_cachep;
+struct kmem_cache *sighand_cachep;
 
 /* SLAB cache for files_struct structures (tsk->files) */
-kmem_cache_t *files_cachep;
+struct kmem_cache *files_cachep;
 
 /* SLAB cache for fs_struct structures (tsk->fs) */
-kmem_cache_t *fs_cachep;
+struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-kmem_cache_t *vm_area_cachep;
+struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
-static kmem_cache_t *mm_cachep;
+static struct kmem_cache *mm_cachep;
 
 void free_task(struct task_struct *tsk)
 {
@@ -237,7 +237,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (!tmp)
                        goto fail_nomem;
                *tmp = *mpnt;
@@ -319,7 +319,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
 
  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
 
-#define allocate_mm()  (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
+#define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)    (kmem_cache_free(mm_cachep, (mm)))
 
 #include <linux/init_task.h>
@@ -448,7 +448,16 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
                tsk->vfork_done = NULL;
                complete(vfork_done);
        }
-       if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
+
+       /*
+        * If we're exiting normally, clear a user-space tid field if
+        * requested.  We leave this alone when dying by signal, to leave
+        * the value intact in a core dump, and to save the unnecessary
+        * trouble otherwise.  Userland only wants this done for a sys_exit.
+        */
+       if (tsk->clear_child_tid
+           && !(tsk->flags & PF_SIGNALED)
+           && atomic_read(&mm->mm_users) > 1) {
                u32 __user * tidptr = tsk->clear_child_tid;
                tsk->clear_child_tid = NULL;
 
@@ -479,6 +488,10 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
 
        memcpy(mm, oldmm, sizeof(*mm));
 
+       /* Initializing for Swap token stuff */
+       mm->token_priority = 0;
+       mm->last_interval = 0;
+
        if (!mm_init(mm))
                goto fail_nomem;
 
@@ -542,6 +555,10 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
                goto fail_nomem;
 
 good_mm:
+       /* Initializing for Swap token stuff */
+       mm->token_priority = 0;
+       mm->last_interval = 0;
+
        tsk->mm = mm;
        tsk->active_mm = mm;
        return 0;
@@ -613,7 +630,7 @@ static struct files_struct *alloc_files(void)
        struct files_struct *newf;
        struct fdtable *fdt;
 
-       newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
+       newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
        if (!newf)
                goto out;
 
@@ -830,7 +847,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
        if (clone_flags & CLONE_THREAD) {
                atomic_inc(&current->signal->count);
                atomic_inc(&current->signal->live);
-               taskstats_tgid_alloc(current);
                return 0;
        }
        sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -1303,7 +1319,7 @@ fork_out:
        return ERR_PTR(retval);
 }
 
-struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
 {
        memset(regs, 0, sizeof(struct pt_regs));
        return regs;
@@ -1413,7 +1429,7 @@ long do_fork(unsigned long clone_flags,
 #define ARCH_MIN_MMSTRUCT_ALIGN 0
 #endif
 
-static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
+static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags)
 {
        struct sighand_struct *sighand = data;
 
index 93ef30ba209fc865c7507459f0995849d5d6e336..95989a3b4168d15d8ac2c42d3e0b779d8c739b2c 100644 (file)
@@ -282,9 +282,9 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
 {
        int ret;
 
-       inc_preempt_count();
+       pagefault_disable();
        ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-       dec_preempt_count();
+       pagefault_enable();
 
        return ret ? -EFAULT : 0;
 }
@@ -324,12 +324,11 @@ static int refill_pi_state_cache(void)
        if (likely(current->pi_state_cache))
                return 0;
 
-       pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
+       pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
 
        if (!pi_state)
                return -ENOMEM;
 
-       memset(pi_state, 0, sizeof(*pi_state));
        INIT_LIST_HEAD(&pi_state->list);
        /* pi_mutex gets initialized later */
        pi_state->owner = NULL;
@@ -553,7 +552,7 @@ static void wake_futex(struct futex_q *q)
         * at the end of wake_up_all() does not prevent this store from
         * moving.
         */
-       wmb();
+       smp_wmb();
        q->lock_ptr = NULL;
 }
 
@@ -585,9 +584,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
        if (!(uval & FUTEX_OWNER_DIED)) {
                newval = FUTEX_WAITERS | new_owner->pid;
 
-               inc_preempt_count();
+               pagefault_disable();
                curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-               dec_preempt_count();
+               pagefault_enable();
                if (curval == -EFAULT)
                        return -EFAULT;
                if (curval != uval)
@@ -618,9 +617,9 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
         * There is no waiter, so we unlock the futex. The owner died
         * bit has not to be preserved here. We are the owner:
         */
-       inc_preempt_count();
+       pagefault_disable();
        oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
-       dec_preempt_count();
+       pagefault_enable();
 
        if (oldval == -EFAULT)
                return oldval;
@@ -1158,9 +1157,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
         */
        newval = current->pid;
 
-       inc_preempt_count();
+       pagefault_disable();
        curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
-       dec_preempt_count();
+       pagefault_enable();
 
        if (unlikely(curval == -EFAULT))
                goto uaddr_faulted;
@@ -1183,9 +1182,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
        uval = curval;
        newval = uval | FUTEX_WAITERS;
 
-       inc_preempt_count();
+       pagefault_disable();
        curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
-       dec_preempt_count();
+       pagefault_enable();
 
        if (unlikely(curval == -EFAULT))
                goto uaddr_faulted;
@@ -1215,10 +1214,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
                        newval = current->pid |
                                FUTEX_OWNER_DIED | FUTEX_WAITERS;
 
-                       inc_preempt_count();
+                       pagefault_disable();
                        curval = futex_atomic_cmpxchg_inatomic(uaddr,
                                                               uval, newval);
-                       dec_preempt_count();
+                       pagefault_enable();
 
                        if (unlikely(curval == -EFAULT))
                                goto uaddr_faulted;
@@ -1390,9 +1389,9 @@ retry_locked:
         * anyone else up:
         */
        if (!(uval & FUTEX_OWNER_DIED)) {
-               inc_preempt_count();
+               pagefault_disable();
                uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
-               dec_preempt_count();
+               pagefault_enable();
        }
 
        if (unlikely(uval == -EFAULT))
@@ -1493,7 +1492,7 @@ static unsigned int futex_poll(struct file *filp,
        return ret;
 }
 
-static struct file_operations futex_fops = {
+static const struct file_operations futex_fops = {
        .release        = futex_close,
        .poll           = futex_poll,
 };
@@ -1858,10 +1857,16 @@ static struct file_system_type futex_fs_type = {
 
 static int __init init(void)
 {
-       unsigned int i;
+       int i = register_filesystem(&futex_fs_type);
+
+       if (i)
+               return i;
 
-       register_filesystem(&futex_fs_type);
        futex_mnt = kern_mount(&futex_fs_type);
+       if (IS_ERR(futex_mnt)) {
+               unregister_filesystem(&futex_fs_type);
+               return PTR_ERR(futex_mnt);
+       }
 
        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
                INIT_LIST_HEAD(&futex_queues[i].chain);
index a681912bc89a10388e6a4eeecd44dfe05416aff9..aff1f0fabb0df0b462c0d83ea4eb35d0ecc24fe0 100644 (file)
@@ -54,7 +54,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = {
                .chip = &no_irq_chip,
                .handle_irq = handle_bad_irq,
                .depth = 1,
-               .lock = SPIN_LOCK_UNLOCKED,
+               .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
 #ifdef CONFIG_SMP
                .affinity = CPU_MASK_ALL
 #endif
index eeac3e313b2bfa41f26a4ccd9235232593fb947c..ab63cfc429929ff9e93461ebe0aad8b280929d65 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/proc_fs.h>
 #include <linux/sched.h>       /* for cond_resched */
 #include <linux/mm.h>
+#include <linux/ctype.h>
 
 #include <asm/sections.h>
 
@@ -301,13 +302,6 @@ struct kallsym_iter
        char name[KSYM_NAME_LEN+1];
 };
 
-/* Only label it "global" if it is exported. */
-static void upcase_if_global(struct kallsym_iter *iter)
-{
-       if (is_exported(iter->name, iter->owner))
-               iter->type += 'A' - 'a';
-}
-
 static int get_ksymbol_mod(struct kallsym_iter *iter)
 {
        iter->owner = module_get_kallsym(iter->pos - kallsyms_num_syms,
@@ -316,7 +310,10 @@ static int get_ksymbol_mod(struct kallsym_iter *iter)
        if (iter->owner == NULL)
                return 0;
 
-       upcase_if_global(iter);
+       /* Label it "global" if it is exported, "local" if not exported. */
+       iter->type = is_exported(iter->name, iter->owner)
+               ? toupper(iter->type) : tolower(iter->type);
+
        return 1;
 }
 
@@ -401,7 +398,7 @@ static int s_show(struct seq_file *m, void *p)
        return 0;
 }
 
-static struct seq_operations kallsyms_op = {
+static const struct seq_operations kallsyms_op = {
        .start = s_start,
        .next = s_next,
        .stop = s_stop,
@@ -436,7 +433,7 @@ static int kallsyms_release(struct inode *inode, struct file *file)
        return seq_release(inode, file);
 }
 
-static struct file_operations kallsyms_operations = {
+static const struct file_operations kallsyms_operations = {
        .open = kallsyms_open,
        .read = seq_read,
        .llseek = seq_lseek,
index fcdd5d2bc3f4b47b57854c41a1cec882939c02ed..afbbbe981be288ecef8112b95a53321ee1971959 100644 (file)
@@ -20,6 +20,8 @@
 #include <linux/syscalls.h>
 #include <linux/ioport.h>
 #include <linux/hardirq.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -108,11 +110,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
 
        /* Allocate a controlling structure */
        result = -ENOMEM;
-       image = kmalloc(sizeof(*image), GFP_KERNEL);
+       image = kzalloc(sizeof(*image), GFP_KERNEL);
        if (!image)
                goto out;
 
-       memset(image, 0, sizeof(*image));
        image->head = 0;
        image->entry = &image->head;
        image->last_entry = &image->head;
@@ -1067,6 +1068,60 @@ void crash_kexec(struct pt_regs *regs)
        }
 }
 
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+                           size_t data_len)
+{
+       struct elf_note note;
+
+       note.n_namesz = strlen(name) + 1;
+       note.n_descsz = data_len;
+       note.n_type   = type;
+       memcpy(buf, &note, sizeof(note));
+       buf += (sizeof(note) + 3)/4;
+       memcpy(buf, name, note.n_namesz);
+       buf += (note.n_namesz + 3)/4;
+       memcpy(buf, data, note.n_descsz);
+       buf += (note.n_descsz + 3)/4;
+
+       return buf;
+}
+
+static void final_note(u32 *buf)
+{
+       struct elf_note note;
+
+       note.n_namesz = 0;
+       note.n_descsz = 0;
+       note.n_type   = 0;
+       memcpy(buf, &note, sizeof(note));
+}
+
+void crash_save_cpu(struct pt_regs *regs, int cpu)
+{
+       struct elf_prstatus prstatus;
+       u32 *buf;
+
+       if ((cpu < 0) || (cpu >= NR_CPUS))
+               return;
+
+       /* Using ELF notes here is opportunistic.
+        * I need a well defined structure format
+        * for the data I pass, and I need tags
+        * on the data to indicate what information I have
+        * squirrelled away.  ELF notes happen to provide
+        * all of that, so there is no need to invent something new.
+        */
+       buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+       if (!buf)
+               return;
+       memset(&prstatus, 0, sizeof(prstatus));
+       prstatus.pr_pid = current->pid;
+       elf_core_copy_regs(&prstatus.pr_reg, regs);
+       buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+                               sizeof(prstatus));
+       final_note(buf);
+}
+
 static int __init crash_notes_memory_init(void)
 {
        /* Allocate memory for saving cpu registers. */
index 2b76dee284964c82ca42a81b2ddd1463f72d001d..8d2bea09a4ec9b44f86f5574defa6af2a043917d 100644 (file)
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module);
 #endif /* CONFIG_KMOD */
 
 struct subprocess_info {
+       struct work_struct work;
        struct completion *complete;
        char *path;
        char **argv;
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data)
 }
 
 /* This is run by khelper thread  */
-static void __call_usermodehelper(void *data)
+static void __call_usermodehelper(struct work_struct *work)
 {
-       struct subprocess_info *sub_info = data;
+       struct subprocess_info *sub_info =
+               container_of(work, struct subprocess_info, work);
        pid_t pid;
        int wait = sub_info->wait;
 
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct subprocess_info sub_info = {
+               .work           = __WORK_INITIALIZER(sub_info.work,
+                                                    __call_usermodehelper),
                .complete       = &done,
                .path           = path,
                .argv           = argv,
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
                .wait           = wait,
                .retval         = 0,
        };
-       DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
        if (!khelper_wq)
                return -EBUSY;
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp,
        if (path[0] == '\0')
                return 0;
 
-       queue_work(khelper_wq, &work);
+       queue_work(khelper_wq, &sub_info.work);
        wait_for_completion(&done);
        return sub_info.retval;
 }
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
 {
        DECLARE_COMPLETION(done);
        struct subprocess_info sub_info = {
+               .work           = __WORK_INITIALIZER(sub_info.work,
+                                                    __call_usermodehelper),
                .complete       = &done,
                .path           = path,
                .argv           = argv,
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
                .retval         = 0,
        };
        struct file *f;
-       DECLARE_WORK(work, __call_usermodehelper, &sub_info);
 
        if (!khelper_wq)
                return -EBUSY;
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
        }
        sub_info.stdin = f;
 
-       queue_work(khelper_wq, &work);
+       queue_work(khelper_wq, &sub_info.work);
        wait_for_completion(&done);
        return sub_info.retval;
 }
index 610c837ad9e0aa8923f48541aaa7ea903c6b2fd2..17ec4afb0994e48adc1c7a9cfde971f04b08d512 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/module.h>
 #include <linux/moduleloader.h>
 #include <linux/kallsyms.h>
+#include <linux/freezer.h>
 #include <asm-generic/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
@@ -83,9 +84,36 @@ struct kprobe_insn_page {
        kprobe_opcode_t *insns;         /* Page of instruction slots */
        char slot_used[INSNS_PER_PAGE];
        int nused;
+       int ngarbage;
 };
 
 static struct hlist_head kprobe_insn_pages;
+static int kprobe_garbage_slots;
+static int collect_garbage_slots(void);
+
+static int __kprobes check_safety(void)
+{
+       int ret = 0;
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
+       ret = freeze_processes();
+       if (ret == 0) {
+               struct task_struct *p, *q;
+               do_each_thread(p, q) {
+                       if (p != current && p->state == TASK_RUNNING &&
+                           p->pid != 0) {
+                               printk("Check failed: %s is running\n",p->comm);
+                               ret = -1;
+                               goto loop_end;
+                       }
+               } while_each_thread(p, q);
+       }
+loop_end:
+       thaw_processes();
+#else
+       synchronize_sched();
+#endif
+       return ret;
+}
 
 /**
  * get_insn_slot() - Find a slot on an executable page for an instruction.
@@ -96,6 +124,7 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
        struct kprobe_insn_page *kip;
        struct hlist_node *pos;
 
+      retry:
        hlist_for_each(pos, &kprobe_insn_pages) {
                kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
                if (kip->nused < INSNS_PER_PAGE) {
@@ -112,7 +141,11 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
                }
        }
 
-       /* All out of space.  Need to allocate a new page. Use slot 0.*/
+       /* If there are any garbage slots, collect it and try again. */
+       if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
+               goto retry;
+       }
+       /* All out of space.  Need to allocate a new page. Use slot 0. */
        kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
        if (!kip) {
                return NULL;
@@ -133,10 +166,62 @@ kprobe_opcode_t __kprobes *get_insn_slot(void)
        memset(kip->slot_used, 0, INSNS_PER_PAGE);
        kip->slot_used[0] = 1;
        kip->nused = 1;
+       kip->ngarbage = 0;
        return kip->insns;
 }
 
-void __kprobes free_insn_slot(kprobe_opcode_t *slot)
+/* Return 1 if all garbages are collected, otherwise 0. */
+static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
+{
+       kip->slot_used[idx] = 0;
+       kip->nused--;
+       if (kip->nused == 0) {
+               /*
+                * Page is no longer in use.  Free it unless
+                * it's the last one.  We keep the last one
+                * so as not to have to set it up again the
+                * next time somebody inserts a probe.
+                */
+               hlist_del(&kip->hlist);
+               if (hlist_empty(&kprobe_insn_pages)) {
+                       INIT_HLIST_NODE(&kip->hlist);
+                       hlist_add_head(&kip->hlist,
+                                      &kprobe_insn_pages);
+               } else {
+                       module_free(NULL, kip->insns);
+                       kfree(kip);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+static int __kprobes collect_garbage_slots(void)
+{
+       struct kprobe_insn_page *kip;
+       struct hlist_node *pos, *next;
+
+       /* Ensure no-one is preepmted on the garbages */
+       if (check_safety() != 0)
+               return -EAGAIN;
+
+       hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
+               int i;
+               kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
+               if (kip->ngarbage == 0)
+                       continue;
+               kip->ngarbage = 0;      /* we will collect all garbages */
+               for (i = 0; i < INSNS_PER_PAGE; i++) {
+                       if (kip->slot_used[i] == -1 &&
+                           collect_one_slot(kip, i))
+                               break;
+               }
+       }
+       kprobe_garbage_slots = 0;
+       return 0;
+}
+
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
 {
        struct kprobe_insn_page *kip;
        struct hlist_node *pos;
@@ -146,28 +231,18 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
                if (kip->insns <= slot &&
                    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
                        int i = (slot - kip->insns) / MAX_INSN_SIZE;
-                       kip->slot_used[i] = 0;
-                       kip->nused--;
-                       if (kip->nused == 0) {
-                               /*
-                                * Page is no longer in use.  Free it unless
-                                * it's the last one.  We keep the last one
-                                * so as not to have to set it up again the
-                                * next time somebody inserts a probe.
-                                */
-                               hlist_del(&kip->hlist);
-                               if (hlist_empty(&kprobe_insn_pages)) {
-                                       INIT_HLIST_NODE(&kip->hlist);
-                                       hlist_add_head(&kip->hlist,
-                                               &kprobe_insn_pages);
-                               } else {
-                                       module_free(NULL, kip->insns);
-                                       kfree(kip);
-                               }
+                       if (dirty) {
+                               kip->slot_used[i] = -1;
+                               kip->ngarbage++;
+                       } else {
+                               collect_one_slot(kip, i);
                        }
-                       return;
+                       break;
                }
        }
+       if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
+               collect_garbage_slots();
+       }
 }
 #endif
 
index 4f9c60ef95e83d9dd08aaeff106d602ad7d41051..1db8c72d0d380c637b18e3951df45f32291ceeda 100644 (file)
@@ -31,6 +31,8 @@ struct kthread_create_info
        /* Result passed back to kthread_create() from keventd. */
        struct task_struct *result;
        struct completion done;
+
+       struct work_struct work;
 };
 
 struct kthread_stop_info
@@ -111,9 +113,10 @@ static int kthread(void *_create)
 }
 
 /* We are keventd: create a thread. */
-static void keventd_create_kthread(void *_create)
+static void keventd_create_kthread(struct work_struct *work)
 {
-       struct kthread_create_info *create = _create;
+       struct kthread_create_info *create =
+               container_of(work, struct kthread_create_info, work);
        int pid;
 
        /* We want our own signal handler (we take no signals by default). */
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
                                   ...)
 {
        struct kthread_create_info create;
-       DECLARE_WORK(work, keventd_create_kthread, &create);
 
        create.threadfn = threadfn;
        create.data = data;
        init_completion(&create.started);
        init_completion(&create.done);
+       INIT_WORK(&create.work, keventd_create_kthread);
 
        /*
         * The workqueue needs to start up first:
         */
        if (!helper_wq)
-               work.func(work.data);
+               create.work.func(&create.work);
        else {
-               queue_work(helper_wq, &work);
+               queue_work(helper_wq, &create.work);
                wait_for_completion(&create.done);
        }
        if (!IS_ERR(create.result)) {
index c9fefdb1a7db45df9658dd32d963e7f28bbdfdf4..b02032476dc2e88967940060a84b5bc5742f1a5e 100644 (file)
@@ -140,13 +140,6 @@ void lockdep_on(void)
 
 EXPORT_SYMBOL(lockdep_on);
 
-int lockdep_internal(void)
-{
-       return current->lockdep_recursion != 0;
-}
-
-EXPORT_SYMBOL(lockdep_internal);
-
 /*
  * Debugging switches:
  */
@@ -228,17 +221,15 @@ static int save_trace(struct stack_trace *trace)
        trace->skip = 3;
        trace->all_contexts = 0;
 
-       /* Make sure to not recurse in case the the unwinder needs to tak
-e         locks. */
-       lockdep_off();
        save_stack_trace(trace, NULL);
-       lockdep_on();
 
        trace->max_entries = trace->nr_entries;
 
        nr_stack_trace_entries += trace->nr_entries;
-       if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
+       if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
+               __raw_spin_unlock(&hash_lock);
                return 0;
+       }
 
        if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
                __raw_spin_unlock(&hash_lock);
@@ -357,7 +348,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4
 
 static void print_lock_name(struct lock_class *class)
 {
-       char str[128], c1, c2, c3, c4;
+       char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
        const char *name;
 
        get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -379,7 +370,7 @@ static void print_lock_name(struct lock_class *class)
 static void print_lockdep_cache(struct lockdep_map *lock)
 {
        const char *name;
-       char str[128];
+       char str[KSYM_NAME_LEN + 1];
 
        name = lock->name;
        if (!name)
@@ -449,7 +440,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
        print_lock_class_header(class, depth);
 
        list_for_each_entry(entry, &class->locks_after, entry) {
-               DEBUG_LOCKS_WARN_ON(!entry->class);
+               if (DEBUG_LOCKS_WARN_ON(!entry->class))
+                       return;
+
                print_lock_dependencies(entry->class, depth + 1);
 
                printk("%*s ... acquired at:\n",depth,"");
@@ -474,7 +467,8 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
                return 0;
 
        entry->class = this;
-       save_trace(&entry->trace);
+       if (!save_trace(&entry->trace))
+               return 0;
 
        /*
         * Since we never remove from the dependency list, the list can
@@ -562,8 +556,12 @@ static noinline int print_circular_bug_tail(void)
        if (debug_locks_silent)
                return 0;
 
+       /* hash_lock unlocked by the header */
+       __raw_spin_lock(&hash_lock);
        this.class = check_source->class;
-       save_trace(&this.trace);
+       if (!save_trace(&this.trace))
+               return 0;
+       __raw_spin_unlock(&hash_lock);
        print_circular_bug_entry(&this, 0);
 
        printk("\nother info that might help us debug this:\n\n");
@@ -966,14 +964,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                               &prev->class->locks_after, next->acquire_ip);
        if (!ret)
                return 0;
-       /*
-        * Return value of 2 signals 'dependency already added',
-        * in that case we dont have to add the backlink either.
-        */
-       if (ret == 2)
-               return 2;
+
        ret = add_lock_to_list(next->class, prev->class,
                               &next->class->locks_before, next->acquire_ip);
+       if (!ret)
+               return 0;
 
        /*
         * Debugging printouts:
@@ -1025,7 +1020,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                 * added:
                 */
                if (hlock->read != 2) {
-                       check_prev_add(curr, hlock, next);
+                       if (!check_prev_add(curr, hlock, next))
+                               return 0;
                        /*
                         * Stop after the first non-trylock entry,
                         * as non-trylock entries have added their
@@ -1182,6 +1178,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        struct lockdep_subclass_key *key;
        struct list_head *hash_head;
        struct lock_class *class;
+       unsigned long flags;
 
        class = look_up_lock_class(lock, subclass);
        if (likely(class))
@@ -1203,6 +1200,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        key = lock->key->subkeys + subclass;
        hash_head = classhashentry(key);
 
+       raw_local_irq_save(flags);
        __raw_spin_lock(&hash_lock);
        /*
         * We have to do the hash-walk again, to avoid races
@@ -1217,6 +1215,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
         */
        if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
                __raw_spin_unlock(&hash_lock);
+               raw_local_irq_restore(flags);
                debug_locks_off();
                printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
                printk("turning off the locking correctness validator.\n");
@@ -1239,15 +1238,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
        if (verbose(class)) {
                __raw_spin_unlock(&hash_lock);
+               raw_local_irq_restore(flags);
                printk("\nnew class %p: %s", class->key, class->name);
                if (class->name_version > 1)
                        printk("#%d", class->name_version);
                printk("\n");
                dump_stack();
+               raw_local_irq_save(flags);
                __raw_spin_lock(&hash_lock);
        }
 out_unlock_set:
        __raw_spin_unlock(&hash_lock);
+       raw_local_irq_restore(flags);
 
        if (!subclass || force)
                lock->class_cache = class;
@@ -1728,6 +1730,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
                debug_atomic_dec(&nr_unused_locks);
                break;
        default:
+               __raw_spin_unlock(&hash_lock);
                debug_locks_off();
                WARN_ON(1);
                return 0;
@@ -2645,6 +2648,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        }
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
 static void print_held_locks_bug(struct task_struct *curr)
 {
index eab043c83bb2d1859e2e1c295737f8ac9386bcca..8ce09bc4613dc1ea72566bb0a2a7822ac802930f 100644 (file)
@@ -20,7 +20,7 @@
 #define MAX_LOCKDEP_KEYS_BITS  11
 #define MAX_LOCKDEP_KEYS       (1UL << MAX_LOCKDEP_KEYS_BITS)
 
-#define MAX_LOCKDEP_CHAINS_BITS        13
+#define MAX_LOCKDEP_CHAINS_BITS        14
 #define MAX_LOCKDEP_CHAINS     (1UL << MAX_LOCKDEP_CHAINS_BITS)
 
 /*
index f6e72eaab3faf08b18c65092ef1906ed696a1dd7..b554b40a4aa6ca1872f7225a47bc2829300e24b5 100644 (file)
@@ -113,7 +113,7 @@ static int l_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static struct seq_operations lockdep_ops = {
+static const struct seq_operations lockdep_ops = {
        .start  = l_start,
        .next   = l_next,
        .stop   = l_stop,
@@ -135,7 +135,7 @@ static int lockdep_open(struct inode *inode, struct file *file)
        return res;
 }
 
-static struct file_operations proc_lockdep_operations = {
+static const struct file_operations proc_lockdep_operations = {
        .open           = lockdep_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -319,7 +319,7 @@ static int lockdep_stats_open(struct inode *inode, struct file *file)
        return single_open(file, lockdep_stats_show, NULL);
 }
 
-static struct file_operations proc_lockdep_stats_operations = {
+static const struct file_operations proc_lockdep_stats_operations = {
        .open           = lockdep_stats_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index e2d09d604ca08a7f7e73cd20eb972dcb02525115..d9eae45d0145a077d6b32d2b29156f9d3b18bfdd 100644 (file)
@@ -2209,7 +2209,7 @@ static int m_show(struct seq_file *m, void *p)
    Where refcount is a number or -, and deps is a comma-separated list
    of depends or -.
 */
-struct seq_operations modules_op = {
+const struct seq_operations modules_op = {
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
index 18651641a7b5aa6482e0aa73ce03b5133e6ffa91..841539d72c55cdbb674b7a91786847c37ec5acdb 100644 (file)
@@ -77,6 +77,9 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 
 void debug_mutex_unlock(struct mutex *lock)
 {
+       if (unlikely(!debug_locks))
+               return;
+
        DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
        DEBUG_LOCKS_WARN_ON(lock->magic != lock);
        DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
index b914392085f9a171f08a3e202dc14145e1563929..a48879b0b9212a14042e686b0518d79968519df8 100644 (file)
@@ -31,7 +31,7 @@
 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
 static struct hlist_head *pid_hash;
 static int pidhash_shift;
-static kmem_cache_t *pid_cachep;
+static struct kmem_cache *pid_cachep;
 
 int pid_max = PID_MAX_DEFAULT;
 
index 9cbb5d1be06f7420005bdf32538777deb844af03..5fe87de10ff07ae1afbecc8e076360c4f6005424 100644 (file)
@@ -70,7 +70,7 @@
 /*
  * Lets keep our timers in a slab cache :-)
  */
-static kmem_cache_t *posix_timers_cache;
+static struct kmem_cache *posix_timers_cache;
 static struct idr posix_timers_id;
 static DEFINE_SPINLOCK(idr_lock);
 
index 825068ca347994570680e15f8858d590d244e61c..710ed084e7c5667b96da2362f67025fb8e521d50 100644 (file)
@@ -78,7 +78,7 @@ config PM_SYSFS_DEPRECATED
 
 config SOFTWARE_SUSPEND
        bool "Software Suspend"
-       depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP) && !X86_PAE) || ((FRV || PPC32) && !SMP))
+       depends on PM && SWAP && ((X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP))
        ---help---
          Enable the possibility of suspending the machine.
          It doesn't need ACPI or APM.
index b1fb7866b0b31d65ccea5946aa22e9c2881e0322..0b00f56c2ad0ad369fdaa42d40e24c6f56720881 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/pm.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
 static int noresume = 0;
 char resume_file[256] = CONFIG_PM_STD_PARTITION;
 dev_t swsusp_resume_device;
+sector_t swsusp_resume_block;
+
+/**
+ *     platform_prepare - prepare the machine for hibernation using the
+ *     platform driver if so configured and return an error code if it fails
+ */
+
+static inline int platform_prepare(void)
+{
+       int error = 0;
+
+       if (pm_disk_mode == PM_DISK_PLATFORM) {
+               if (pm_ops && pm_ops->prepare)
+                       error = pm_ops->prepare(PM_SUSPEND_DISK);
+       }
+       return error;
+}
 
 /**
  *     power_down - Shut machine down for hibernate.
@@ -40,12 +58,10 @@ dev_t swsusp_resume_device;
 
 static void power_down(suspend_disk_method_t mode)
 {
-       int error = 0;
-
        switch(mode) {
        case PM_DISK_PLATFORM:
                kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
-               error = pm_ops->enter(PM_SUSPEND_DISK);
+               pm_ops->enter(PM_SUSPEND_DISK);
                break;
        case PM_DISK_SHUTDOWN:
                kernel_power_off();
@@ -90,12 +106,18 @@ static int prepare_processes(void)
                goto thaw;
        }
 
+       error = platform_prepare();
+       if (error)
+               goto thaw;
+
        /* Free memory before shutting down devices. */
        if (!(error = swsusp_shrink_memory()))
                return 0;
-thaw:
+
+       platform_finish();
+ thaw:
        thaw_processes();
-enable_cpus:
+ enable_cpus:
        enable_nonboot_cpus();
        pm_restore_console();
        return error;
@@ -127,7 +149,7 @@ int pm_suspend_disk(void)
                return error;
 
        if (pm_disk_mode == PM_DISK_TESTPROC)
-               goto Thaw;
+               return 0;
 
        suspend_console();
        error = device_suspend(PMSG_FREEZE);
@@ -189,10 +211,10 @@ static int software_resume(void)
 {
        int error;
 
-       down(&pm_sem);
+       mutex_lock(&pm_mutex);
        if (!swsusp_resume_device) {
                if (!strlen(resume_file)) {
-                       up(&pm_sem);
+                       mutex_unlock(&pm_mutex);
                        return -ENOENT;
                }
                swsusp_resume_device = name_to_dev_t(resume_file);
@@ -207,7 +229,7 @@ static int software_resume(void)
                 * FIXME: If noresume is specified, we need to find the partition
                 * and reset it back to normal swap space.
                 */
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
                return 0;
        }
 
@@ -251,7 +273,7 @@ static int software_resume(void)
        unprepare_processes();
  Done:
        /* For success case, the suspend path will release the lock */
-       up(&pm_sem);
+       mutex_unlock(&pm_mutex);
        pr_debug("PM: Resume from disk failed.\n");
        return 0;
 }
@@ -312,7 +334,7 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n)
        p = memchr(buf, '\n', n);
        len = p ? p - buf : n;
 
-       down(&pm_sem);
+       mutex_lock(&pm_mutex);
        for (i = PM_DISK_FIRMWARE; i < PM_DISK_MAX; i++) {
                if (!strncmp(buf, pm_disk_modes[i], len)) {
                        mode = i;
@@ -336,7 +358,7 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n)
 
        pr_debug("PM: suspend-to-disk mode set to '%s'\n",
                 pm_disk_modes[mode]);
-       up(&pm_sem);
+       mutex_unlock(&pm_mutex);
        return error ? error : n;
 }
 
@@ -361,14 +383,14 @@ static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n)
        if (maj != MAJOR(res) || min != MINOR(res))
                goto out;
 
-       down(&pm_sem);
+       mutex_lock(&pm_mutex);
        swsusp_resume_device = res;
-       up(&pm_sem);
+       mutex_unlock(&pm_mutex);
        printk("Attempting manual resume\n");
        noresume = 0;
        software_resume();
        ret = n;
-out:
+ out:
        return ret;
 }
 
@@ -423,6 +445,19 @@ static int __init resume_setup(char *str)
        return 1;
 }
 
+static int __init resume_offset_setup(char *str)
+{
+       unsigned long long offset;
+
+       if (noresume)
+               return 1;
+
+       if (sscanf(str, "%llu", &offset) == 1)
+               swsusp_resume_block = offset;
+
+       return 1;
+}
+
 static int __init noresume_setup(char *str)
 {
        noresume = 1;
@@ -430,4 +465,5 @@ static int __init noresume_setup(char *str)
 }
 
 __setup("noresume", noresume_setup);
+__setup("resume_offset=", resume_offset_setup);
 __setup("resume=", resume_setup);
index 873228c71dabdf700abcf2e6c19dcb3cc96315b7..500eb87f643dd9ff56d08079a29c506e7140b777 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/kobject.h>
 #include <linux/string.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/resume-trace.h>
+#include <linux/freezer.h>
 
 #include "power.h"
 
 /*This is just an arbitrary number */
 #define FREE_PAGE_NUMBER (100)
 
-DECLARE_MUTEX(pm_sem);
+DEFINE_MUTEX(pm_mutex);
 
 struct pm_ops *pm_ops;
 suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
@@ -36,9 +38,9 @@ suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
 
 void pm_set_ops(struct pm_ops * ops)
 {
-       down(&pm_sem);
+       mutex_lock(&pm_mutex);
        pm_ops = ops;
-       up(&pm_sem);
+       mutex_unlock(&pm_mutex);
 }
 
 
@@ -182,7 +184,7 @@ static int enter_state(suspend_state_t state)
 
        if (!valid_state(state))
                return -ENODEV;
-       if (down_trylock(&pm_sem))
+       if (!mutex_trylock(&pm_mutex))
                return -EBUSY;
 
        if (state == PM_SUSPEND_DISK) {
@@ -200,7 +202,7 @@ static int enter_state(suspend_state_t state)
        pr_debug("PM: Finishing wakeup.\n");
        suspend_finish(state);
  Unlock:
-       up(&pm_sem);
+       mutex_unlock(&pm_mutex);
        return error;
 }
 
@@ -229,7 +231,7 @@ int pm_suspend(suspend_state_t state)
        return -EINVAL;
 }
 
-
+EXPORT_SYMBOL(pm_suspend);
 
 decl_subsys(power,NULL,NULL);
 
index bfe999f7b2720b9b3fa1b0a667103e35282922d1..eb461b816bf430cd4336a249ce16812e260a19b1 100644 (file)
@@ -22,7 +22,9 @@ static inline int pm_suspend_disk(void)
        return -EPERM;
 }
 #endif
-extern struct semaphore pm_sem;
+
+extern struct mutex pm_mutex;
+
 #define power_attr(_name) \
 static struct subsys_attribute _name##_attr = {        \
        .attr   = {                             \
@@ -42,6 +44,7 @@ extern const void __nosave_begin, __nosave_end;
 extern unsigned long image_size;
 extern int in_suspend;
 extern dev_t swsusp_resume_device;
+extern sector_t swsusp_resume_block;
 
 extern asmlinkage int swsusp_arch_suspend(void);
 extern asmlinkage int swsusp_arch_resume(void);
@@ -102,8 +105,18 @@ struct snapshot_handle {
 extern unsigned int snapshot_additional_pages(struct zone *zone);
 extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
 extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
+extern void snapshot_write_finalize(struct snapshot_handle *handle);
 extern int snapshot_image_loaded(struct snapshot_handle *handle);
-extern void snapshot_free_unused_memory(struct snapshot_handle *handle);
+
+/*
+ * This structure is used to pass the values needed for the identification
+ * of the resume swap area from a user space to the kernel via the
+ * SNAPSHOT_SET_SWAP_AREA ioctl
+ */
+struct resume_swap_area {
+       loff_t offset;
+       u_int32_t dev;
+} __attribute__((packed));
 
 #define SNAPSHOT_IOC_MAGIC     '3'
 #define SNAPSHOT_FREEZE                        _IO(SNAPSHOT_IOC_MAGIC, 1)
@@ -117,7 +130,14 @@ extern void snapshot_free_unused_memory(struct snapshot_handle *handle);
 #define SNAPSHOT_FREE_SWAP_PAGES       _IO(SNAPSHOT_IOC_MAGIC, 9)
 #define SNAPSHOT_SET_SWAP_FILE         _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
 #define SNAPSHOT_S2RAM                 _IO(SNAPSHOT_IOC_MAGIC, 11)
-#define SNAPSHOT_IOC_MAXNR     11
+#define SNAPSHOT_PMOPS                 _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+#define SNAPSHOT_SET_SWAP_AREA         _IOW(SNAPSHOT_IOC_MAGIC, 13, \
+                                                       struct resume_swap_area)
+#define SNAPSHOT_IOC_MAXNR     13
+
+#define PMOPS_PREPARE  1
+#define PMOPS_ENTER    2
+#define PMOPS_FINISH   3
 
 /**
  *     The bitmap is used for tracing allocated swap pages
@@ -141,7 +161,7 @@ struct bitmap_page {
 
 extern void free_bitmap(struct bitmap_page *bitmap);
 extern struct bitmap_page *alloc_bitmap(unsigned int nr_bits);
-extern unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap);
+extern sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap);
 extern void free_all_swap_pages(int swap, struct bitmap_page *bitmap);
 
 extern int swsusp_check(void);
@@ -153,3 +173,7 @@ extern int swsusp_read(void);
 extern int swsusp_write(void);
 extern void swsusp_close(void);
 extern int suspend_enter(suspend_state_t state);
+
+struct timeval;
+extern void swsusp_show_speed(struct timeval *, struct timeval *,
+                               unsigned int, char *);
index f1f900ac31640c2072778e9a4ece504fe8200d68..678ec736076b1395b4a16949cb5f595b72bdd616 100644 (file)
  * callback we use.
  */
 
-static void do_poweroff(void *dummy)
+static void do_poweroff(struct work_struct *dummy)
 {
        kernel_power_off();
 }
 
-static DECLARE_WORK(poweroff_work, do_poweroff, NULL);
+static DECLARE_WORK(poweroff_work, do_poweroff);
 
 static void handle_poweroff(int key, struct tty_struct *tty)
 {
index 72e72d2c61e6e412106c2e58733087b9ba16ac81..99eeb119b06db1d350d04de044fdb624f4627a19 100644 (file)
 #include <linux/suspend.h>
 #include <linux/module.h>
 #include <linux/syscalls.h>
+#include <linux/freezer.h>
 
 /* 
  * Timeout for stopping processes
  */
 #define TIMEOUT        (20 * HZ)
 
+#define FREEZER_KERNEL_THREADS 0
+#define FREEZER_USER_SPACE 1
 
 static inline int freezeable(struct task_struct * p)
 {
@@ -39,7 +42,6 @@ void refrigerator(void)
        long save;
        save = current->state;
        pr_debug("%s entered refrigerator\n", current->comm);
-       printk("=");
 
        frozen_process(current);
        spin_lock_irq(&current->sighand->siglock);
@@ -79,96 +81,136 @@ static void cancel_freezing(struct task_struct *p)
        }
 }
 
-/* 0 = success, else # of processes that we failed to stop */
-int freeze_processes(void)
+static inline int is_user_space(struct task_struct *p)
+{
+       return p->mm && !(p->flags & PF_BORROWED_MM);
+}
+
+static unsigned int try_to_freeze_tasks(int freeze_user_space)
 {
-       int todo, nr_user, user_frozen;
-       unsigned long start_time;
        struct task_struct *g, *p;
+       unsigned long end_time;
+       unsigned int todo;
 
-       printk( "Stopping tasks: " );
-       start_time = jiffies;
-       user_frozen = 0;
+       end_time = jiffies + TIMEOUT;
        do {
-               nr_user = todo = 0;
+               todo = 0;
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
                        if (!freezeable(p))
                                continue;
+
                        if (frozen(p))
                                continue;
-                       if (p->state == TASK_TRACED && frozen(p->parent)) {
+
+                       if (p->state == TASK_TRACED &&
+                           (frozen(p->parent) ||
+                            p->parent->state == TASK_STOPPED)) {
                                cancel_freezing(p);
                                continue;
                        }
-                       if (p->mm && !(p->flags & PF_BORROWED_MM)) {
-                               /* The task is a user-space one.
-                                * Freeze it unless there's a vfork completion
-                                * pending
+                       if (is_user_space(p)) {
+                               if (!freeze_user_space)
+                                       continue;
+
+                               /* Freeze the task unless there is a vfork
+                                * completion pending
                                 */
                                if (!p->vfork_done)
                                        freeze_process(p);
-                               nr_user++;
                        } else {
-                               /* Freeze only if the user space is frozen */
-                               if (user_frozen)
-                                       freeze_process(p);
-                               todo++;
+                               if (freeze_user_space)
+                                       continue;
+
+                               freeze_process(p);
                        }
+                       todo++;
                } while_each_thread(g, p);
                read_unlock(&tasklist_lock);
-               todo += nr_user;
-               if (!user_frozen && !nr_user) {
-                       sys_sync();
-                       start_time = jiffies;
-               }
-               user_frozen = !nr_user;
                yield();                        /* Yield is okay here */
-               if (todo && time_after(jiffies, start_time + TIMEOUT))
+               if (todo && time_after(jiffies, end_time))
                        break;
-       } while(todo);
+       } while (todo);
 
-       /* This does not unfreeze processes that are already frozen
-        * (we have slightly ugly calling convention in that respect,
-        * and caller must call thaw_processes() if something fails),
-        * but it cleans up leftover PF_FREEZE requests.
-        */
        if (todo) {
-               printk( "\n" );
-               printk(KERN_ERR " stopping tasks timed out "
-                       "after %d seconds (%d tasks remaining):\n",
-                       TIMEOUT / HZ, todo);
+               /* This does not unfreeze processes that are already frozen
+                * (we have slightly ugly calling convention in that respect,
+                * and caller must call thaw_processes() if something fails),
+                * but it cleans up leftover PF_FREEZE requests.
+                */
+               printk("\n");
+               printk(KERN_ERR "Stopping %s timed out after %d seconds "
+                               "(%d tasks refusing to freeze):\n",
+                               freeze_user_space ? "user space processes" :
+                                       "kernel threads",
+                               TIMEOUT / HZ, todo);
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
+                       if (is_user_space(p) == !freeze_user_space)
+                               continue;
+
                        if (freezeable(p) && !frozen(p))
-                               printk(KERN_ERR "  %s\n", p->comm);
+                               printk(KERN_ERR " %s\n", p->comm);
+
                        cancel_freezing(p);
                } while_each_thread(g, p);
                read_unlock(&tasklist_lock);
-               return todo;
        }
 
-       printk( "|\n" );
+       return todo;
+}
+
+/**
+ *     freeze_processes - tell processes to enter the refrigerator
+ *
+ *     Returns 0 on success, or the number of processes that didn't freeze,
+ *     although they were told to.
+ */
+int freeze_processes(void)
+{
+       unsigned int nr_unfrozen;
+
+       printk("Stopping tasks ... ");
+       nr_unfrozen = try_to_freeze_tasks(FREEZER_USER_SPACE);
+       if (nr_unfrozen)
+               return nr_unfrozen;
+
+       sys_sync();
+       nr_unfrozen = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
+       if (nr_unfrozen)
+               return nr_unfrozen;
+
+       printk("done.\n");
        BUG_ON(in_atomic());
        return 0;
 }
 
-void thaw_processes(void)
+static void thaw_tasks(int thaw_user_space)
 {
        struct task_struct *g, *p;
 
-       printk( "Restarting tasks..." );
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
                if (!freezeable(p))
                        continue;
+
+               if (is_user_space(p) == !thaw_user_space)
+                       continue;
+
                if (!thaw_process(p))
-                       printk(KERN_INFO " Strange, %s not stopped\n", p->comm );
+                       printk(KERN_WARNING " Strange, %s not stopped\n",
+                               p->comm );
        } while_each_thread(g, p);
-
        read_unlock(&tasklist_lock);
+}
+
+void thaw_processes(void)
+{
+       printk("Restarting tasks ... ");
+       thaw_tasks(FREEZER_KERNEL_THREADS);
+       thaw_tasks(FREEZER_USER_SPACE);
        schedule();
-       printk( " done\n" );
+       printk("done.\n");
 }
 
 EXPORT_SYMBOL(refrigerator);
index 99f9b7d177d6a843f5bcb3ade50e67379e169013..c024606221c4f9d6dd7ba35f3634f9f944fda23e 100644 (file)
@@ -1,15 +1,15 @@
 /*
  * linux/kernel/power/snapshot.c
  *
- * This file provide system snapshot/restore functionality.
+ * This file provides system snapshot/restore functionality for swsusp.
  *
  * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  *
- * This file is released under the GPLv2, and is based on swsusp.c.
+ * This file is released under the GPLv2.
  *
  */
 
-
 #include <linux/version.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 
 #include "power.h"
 
-/* List of PBEs used for creating and restoring the suspend image */
+/* List of PBEs needed for restoring the pages that were allocated before
+ * the suspend and included in the suspend image, but have also been
+ * allocated by the "resume" kernel, so their contents cannot be written
+ * directly to their "original" page frames.
+ */
 struct pbe *restore_pblist;
 
-static unsigned int nr_copy_pages;
-static unsigned int nr_meta_pages;
+/* Pointer to an auxiliary buffer (1 page) */
 static void *buffer;
 
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void)
-{
-       struct zone *zone;
-       unsigned long zone_pfn;
-       unsigned int n = 0;
-
-       for_each_zone (zone)
-               if (is_highmem(zone)) {
-                       mark_free_pages(zone);
-                       for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
-                               struct page *page;
-                               unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-                               if (!pfn_valid(pfn))
-                                       continue;
-                               page = pfn_to_page(pfn);
-                               if (PageReserved(page))
-                                       continue;
-                               if (PageNosaveFree(page))
-                                       continue;
-                               n++;
-                       }
-               }
-       return n;
-}
-
-struct highmem_page {
-       char *data;
-       struct page *page;
-       struct highmem_page *next;
-};
-
-static struct highmem_page *highmem_copy;
-
-static int save_highmem_zone(struct zone *zone)
-{
-       unsigned long zone_pfn;
-       mark_free_pages(zone);
-       for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
-               struct page *page;
-               struct highmem_page *save;
-               void *kaddr;
-               unsigned long pfn = zone_pfn + zone->zone_start_pfn;
-
-               if (!(pfn%10000))
-                       printk(".");
-               if (!pfn_valid(pfn))
-                       continue;
-               page = pfn_to_page(pfn);
-               /*
-                * This condition results from rvmalloc() sans vmalloc_32()
-                * and architectural memory reservations. This should be
-                * corrected eventually when the cases giving rise to this
-                * are better understood.
-                */
-               if (PageReserved(page))
-                       continue;
-               BUG_ON(PageNosave(page));
-               if (PageNosaveFree(page))
-                       continue;
-               save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
-               if (!save)
-                       return -ENOMEM;
-               save->next = highmem_copy;
-               save->page = page;
-               save->data = (void *) get_zeroed_page(GFP_ATOMIC);
-               if (!save->data) {
-                       kfree(save);
-                       return -ENOMEM;
-               }
-               kaddr = kmap_atomic(page, KM_USER0);
-               memcpy(save->data, kaddr, PAGE_SIZE);
-               kunmap_atomic(kaddr, KM_USER0);
-               highmem_copy = save;
-       }
-       return 0;
-}
-
-int save_highmem(void)
-{
-       struct zone *zone;
-       int res = 0;
-
-       pr_debug("swsusp: Saving Highmem");
-       drain_local_pages();
-       for_each_zone (zone) {
-               if (is_highmem(zone))
-                       res = save_highmem_zone(zone);
-               if (res)
-                       return res;
-       }
-       printk("\n");
-       return 0;
-}
-
-int restore_highmem(void)
-{
-       printk("swsusp: Restoring Highmem\n");
-       while (highmem_copy) {
-               struct highmem_page *save = highmem_copy;
-               void *kaddr;
-               highmem_copy = save->next;
-
-               kaddr = kmap_atomic(save->page, KM_USER0);
-               memcpy(kaddr, save->data, PAGE_SIZE);
-               kunmap_atomic(kaddr, KM_USER0);
-               free_page((long) save->data);
-               kfree(save);
-       }
-       return 0;
-}
-#else
-static inline unsigned int count_highmem_pages(void) {return 0;}
-static inline int save_highmem(void) {return 0;}
-static inline int restore_highmem(void) {return 0;}
-#endif
-
 /**
  *     @safe_needed - on resume, for storing the PBE list and the image,
  *     we can only use memory pages that do not conflict with the pages
- *     used before suspend.
+ *     used before suspend.  The unsafe pages have PageNosaveFree set
+ *     and we count them using unsafe_pages.
  *
- *     The unsafe pages are marked with the PG_nosave_free flag
- *     and we count them using unsafe_pages
+ *     Each allocated image page is marked as PageNosave and PageNosaveFree
+ *     so that swsusp_free() can release it.
  */
 
 #define PG_ANY         0
@@ -174,7 +61,7 @@ static inline int restore_highmem(void) {return 0;}
 
 static unsigned int allocated_unsafe_pages;
 
-static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
+static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 {
        void *res;
 
@@ -195,20 +82,39 @@ static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
 
 unsigned long get_safe_page(gfp_t gfp_mask)
 {
-       return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
+       return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
+}
+
+static struct page *alloc_image_page(gfp_t gfp_mask)
+{
+       struct page *page;
+
+       page = alloc_page(gfp_mask);
+       if (page) {
+               SetPageNosave(page);
+               SetPageNosaveFree(page);
+       }
+       return page;
 }
 
 /**
  *     free_image_page - free page represented by @addr, allocated with
- *     alloc_image_page (page flags set by it must be cleared)
+ *     get_image_page (page flags set by it must be cleared)
  */
 
 static inline void free_image_page(void *addr, int clear_nosave_free)
 {
-       ClearPageNosave(virt_to_page(addr));
+       struct page *page;
+
+       BUG_ON(!virt_addr_valid(addr));
+
+       page = virt_to_page(addr);
+
+       ClearPageNosave(page);
        if (clear_nosave_free)
-               ClearPageNosaveFree(virt_to_page(addr));
-       free_page((unsigned long)addr);
+               ClearPageNosaveFree(page);
+
+       __free_page(page);
 }
 
 /* struct linked_page is used to build chains of pages */
@@ -269,7 +175,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
        if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
                struct linked_page *lp;
 
-               lp = alloc_image_page(ca->gfp_mask, ca->safe_needed);
+               lp = get_image_page(ca->gfp_mask, ca->safe_needed);
                if (!lp)
                        return NULL;
 
@@ -446,8 +352,8 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
 
        /* Compute the number of zones */
        nr = 0;
-       for_each_zone (zone)
-               if (populated_zone(zone) && !is_highmem(zone))
+       for_each_zone(zone)
+               if (populated_zone(zone))
                        nr++;
 
        /* Allocate the list of zones bitmap objects */
@@ -459,10 +365,10 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
        }
 
        /* Initialize the zone bitmap objects */
-       for_each_zone (zone) {
+       for_each_zone(zone) {
                unsigned long pfn;
 
-               if (!populated_zone(zone) || is_highmem(zone))
+               if (!populated_zone(zone))
                        continue;
 
                zone_bm->start_pfn = zone->zone_start_pfn;
@@ -481,7 +387,7 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
                while (bb) {
                        unsigned long *ptr;
 
-                       ptr = alloc_image_page(gfp_mask, safe_needed);
+                       ptr = get_image_page(gfp_mask, safe_needed);
                        bb->data = ptr;
                        if (!ptr)
                                goto Free;
@@ -505,7 +411,7 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
        memory_bm_position_reset(bm);
        return 0;
 
-Free:
+ Free:
        bm->p_list = ca.chain;
        memory_bm_free(bm, PG_UNSAFE_CLEAR);
        return -ENOMEM;
@@ -651,7 +557,7 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
        memory_bm_position_reset(bm);
        return BM_END_OF_MAP;
 
-Return_pfn:
+ Return_pfn:
        bm->cur.chunk = chunk;
        bm->cur.bit = bit;
        return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
@@ -669,9 +575,81 @@ unsigned int snapshot_additional_pages(struct zone *zone)
 
        res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
        res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
-       return res;
+       return 2 * res;
+}
+
+#ifdef CONFIG_HIGHMEM
+/**
+ *     count_free_highmem_pages - compute the total number of free highmem
+ *     pages, system-wide.
+ */
+
+static unsigned int count_free_highmem_pages(void)
+{
+       struct zone *zone;
+       unsigned int cnt = 0;
+
+       for_each_zone(zone)
+               if (populated_zone(zone) && is_highmem(zone))
+                       cnt += zone->free_pages;
+
+       return cnt;
+}
+
+/**
+ *     saveable_highmem_page - Determine whether a highmem page should be
+ *     included in the suspend image.
+ *
+ *     We should save the page if it isn't Nosave or NosaveFree, or Reserved,
+ *     and it isn't a part of a free chunk of pages.
+ */
+
+static struct page *saveable_highmem_page(unsigned long pfn)
+{
+       struct page *page;
+
+       if (!pfn_valid(pfn))
+               return NULL;
+
+       page = pfn_to_page(pfn);
+
+       BUG_ON(!PageHighMem(page));
+
+       if (PageNosave(page) || PageReserved(page) || PageNosaveFree(page))
+               return NULL;
+
+       return page;
 }
 
+/**
+ *     count_highmem_pages - compute the total number of saveable highmem
+ *     pages.
+ */
+
+unsigned int count_highmem_pages(void)
+{
+       struct zone *zone;
+       unsigned int n = 0;
+
+       for_each_zone(zone) {
+               unsigned long pfn, max_zone_pfn;
+
+               if (!is_highmem(zone))
+                       continue;
+
+               mark_free_pages(zone);
+               max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+               for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
+                       if (saveable_highmem_page(pfn))
+                               n++;
+       }
+       return n;
+}
+#else
+static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
+static inline unsigned int count_highmem_pages(void) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *     pfn_is_nosave - check if given pfn is in the 'nosave' section
  */
@@ -684,12 +662,12 @@ static inline int pfn_is_nosave(unsigned long pfn)
 }
 
 /**
- *     saveable - Determine whether a page should be cloned or not.
- *     @pfn:   The page
+ *     saveable - Determine whether a non-highmem page should be included in
+ *     the suspend image.
  *
- *     We save a page if it isn't Nosave, and is not in the range of pages
- *     statically defined as 'unsaveable', and it
- *     isn't a part of a free chunk of pages.
+ *     We should save the page if it isn't Nosave, and is not in the range
+ *     of pages statically defined as 'unsaveable', and it isn't a part of
+ *     a free chunk of pages.
  */
 
 static struct page *saveable_page(unsigned long pfn)
@@ -701,76 +679,130 @@ static struct page *saveable_page(unsigned long pfn)
 
        page = pfn_to_page(pfn);
 
-       if (PageNosave(page))
+       BUG_ON(PageHighMem(page));
+
+       if (PageNosave(page) || PageNosaveFree(page))
                return NULL;
+
        if (PageReserved(page) && pfn_is_nosave(pfn))
                return NULL;
-       if (PageNosaveFree(page))
-               return NULL;
 
        return page;
 }
 
+/**
+ *     count_data_pages - compute the total number of saveable non-highmem
+ *     pages.
+ */
+
 unsigned int count_data_pages(void)
 {
        struct zone *zone;
        unsigned long pfn, max_zone_pfn;
        unsigned int n = 0;
 
-       for_each_zone (zone) {
+       for_each_zone(zone) {
                if (is_highmem(zone))
                        continue;
+
                mark_free_pages(zone);
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-                       n += !!saveable_page(pfn);
+                       if(saveable_page(pfn))
+                               n++;
        }
        return n;
 }
 
-static inline void copy_data_page(long *dst, long *src)
+/* This is needed, because copy_page and memcpy are not usable for copying
+ * task structs.
+ */
+static inline void do_copy_page(long *dst, long *src)
 {
        int n;
 
-       /* copy_page and memcpy are not usable for copying task structs. */
        for (n = PAGE_SIZE / sizeof(long); n; n--)
                *dst++ = *src++;
 }
 
+#ifdef CONFIG_HIGHMEM
+static inline struct page *
+page_is_saveable(struct zone *zone, unsigned long pfn)
+{
+       return is_highmem(zone) ?
+                       saveable_highmem_page(pfn) : saveable_page(pfn);
+}
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+       struct page *s_page, *d_page;
+       void *src, *dst;
+
+       s_page = pfn_to_page(src_pfn);
+       d_page = pfn_to_page(dst_pfn);
+       if (PageHighMem(s_page)) {
+               src = kmap_atomic(s_page, KM_USER0);
+               dst = kmap_atomic(d_page, KM_USER1);
+               do_copy_page(dst, src);
+               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(dst, KM_USER1);
+       } else {
+               src = page_address(s_page);
+               if (PageHighMem(d_page)) {
+                       /* Page pointed to by src may contain some kernel
+                        * data modified by kmap_atomic()
+                        */
+                       do_copy_page(buffer, src);
+                       dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
+                       memcpy(dst, buffer, PAGE_SIZE);
+                       kunmap_atomic(dst, KM_USER0);
+               } else {
+                       dst = page_address(d_page);
+                       do_copy_page(dst, src);
+               }
+       }
+}
+#else
+#define page_is_saveable(zone, pfn)    saveable_page(pfn)
+
+static inline void
+copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+{
+       do_copy_page(page_address(pfn_to_page(dst_pfn)),
+                       page_address(pfn_to_page(src_pfn)));
+}
+#endif /* CONFIG_HIGHMEM */
+
 static void
 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
 {
        struct zone *zone;
        unsigned long pfn;
 
-       for_each_zone (zone) {
+       for_each_zone(zone) {
                unsigned long max_zone_pfn;
 
-               if (is_highmem(zone))
-                       continue;
-
                mark_free_pages(zone);
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
-                       if (saveable_page(pfn))
+                       if (page_is_saveable(zone, pfn))
                                memory_bm_set_bit(orig_bm, pfn);
        }
        memory_bm_position_reset(orig_bm);
        memory_bm_position_reset(copy_bm);
        do {
                pfn = memory_bm_next_pfn(orig_bm);
-               if (likely(pfn != BM_END_OF_MAP)) {
-                       struct page *page;
-                       void *src;
-
-                       page = pfn_to_page(pfn);
-                       src = page_address(page);
-                       page = pfn_to_page(memory_bm_next_pfn(copy_bm));
-                       copy_data_page(page_address(page), src);
-               }
+               if (likely(pfn != BM_END_OF_MAP))
+                       copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
        } while (pfn != BM_END_OF_MAP);
 }
 
+/* Total number of image pages */
+static unsigned int nr_copy_pages;
+/* Number of pages needed for saving the original pfns of the image pages */
+static unsigned int nr_meta_pages;
+
 /**
  *     swsusp_free - free pages allocated for the suspend.
  *
@@ -792,7 +824,7 @@ void swsusp_free(void)
                                if (PageNosave(page) && PageNosaveFree(page)) {
                                        ClearPageNosave(page);
                                        ClearPageNosaveFree(page);
-                                       free_page((long) page_address(page));
+                                       __free_page(page);
                                }
                        }
        }
@@ -802,34 +834,108 @@ void swsusp_free(void)
        buffer = NULL;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+  *    count_pages_for_highmem - compute the number of non-highmem pages
+  *    that will be necessary for creating copies of highmem pages.
+  */
+
+static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
+{
+       unsigned int free_highmem = count_free_highmem_pages();
+
+       if (free_highmem >= nr_highmem)
+               nr_highmem = 0;
+       else
+               nr_highmem -= free_highmem;
+
+       return nr_highmem;
+}
+#else
+static unsigned int
+count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
+#endif /* CONFIG_HIGHMEM */
 
 /**
- *     enough_free_mem - Make sure we enough free memory to snapshot.
- *
- *     Returns TRUE or FALSE after checking the number of available
- *     free pages.
+ *     enough_free_mem - Make sure we have enough free memory for the
+ *     snapshot image.
  */
 
-static int enough_free_mem(unsigned int nr_pages)
+static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
 {
        struct zone *zone;
        unsigned int free = 0, meta = 0;
 
-       for_each_zone (zone)
-               if (!is_highmem(zone)) {
+       for_each_zone(zone) {
+               meta += snapshot_additional_pages(zone);
+               if (!is_highmem(zone))
                        free += zone->free_pages;
-                       meta += snapshot_additional_pages(zone);
-               }
+       }
 
-       pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n",
+       nr_pages += count_pages_for_highmem(nr_highmem);
+       pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
                nr_pages, PAGES_FOR_IO, meta, free);
 
        return free > nr_pages + PAGES_FOR_IO + meta;
 }
 
+#ifdef CONFIG_HIGHMEM
+/**
+ *     get_highmem_buffer - if there are some highmem pages in the suspend
+ *     image, we may need the buffer to copy them and/or load their data.
+ */
+
+static inline int get_highmem_buffer(int safe_needed)
+{
+       buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
+       return buffer ? 0 : -ENOMEM;
+}
+
+/**
+ *     alloc_highmem_image_pages - allocate some highmem pages for the image.
+ *     Try to allocate as many pages as needed, but if the number of free
+ *     highmem pages is lesser than that, allocate them all.
+ */
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
+{
+       unsigned int to_alloc = count_free_highmem_pages();
+
+       if (to_alloc > nr_highmem)
+               to_alloc = nr_highmem;
+
+       nr_highmem -= to_alloc;
+       while (to_alloc-- > 0) {
+               struct page *page;
+
+               page = alloc_image_page(__GFP_HIGHMEM);
+               memory_bm_set_bit(bm, page_to_pfn(page));
+       }
+       return nr_highmem;
+}
+#else
+static inline int get_highmem_buffer(int safe_needed) { return 0; }
+
+static inline unsigned int
+alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
+#endif /* CONFIG_HIGHMEM */
+
+/**
+ *     swsusp_alloc - allocate memory for the suspend image
+ *
+ *     We first try to allocate as many highmem pages as there are
+ *     saveable highmem pages in the system.  If that fails, we allocate
+ *     non-highmem pages for the copies of the remaining highmem ones.
+ *
+ *     In this approach it is likely that the copies of highmem pages will
+ *     also be located in the high memory, because of the way in which
+ *     copy_data_pages() works.
+ */
+
 static int
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
-               unsigned int nr_pages)
+               unsigned int nr_pages, unsigned int nr_highmem)
 {
        int error;
 
@@ -841,46 +947,61 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
        if (error)
                goto Free;
 
+       if (nr_highmem > 0) {
+               error = get_highmem_buffer(PG_ANY);
+               if (error)
+                       goto Free;
+
+               nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
+       }
        while (nr_pages-- > 0) {
-               struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+               struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
+
                if (!page)
                        goto Free;
 
-               SetPageNosave(page);
-               SetPageNosaveFree(page);
                memory_bm_set_bit(copy_bm, page_to_pfn(page));
        }
        return 0;
 
-Free:
+ Free:
        swsusp_free();
        return -ENOMEM;
 }
 
-/* Memory bitmap used for marking saveable pages */
+/* Memory bitmap used for marking saveable pages (during suspend) or the
+ * suspend image pages (during resume)
+ */
 static struct memory_bitmap orig_bm;
-/* Memory bitmap used for marking allocated pages that will contain the copies
- * of saveable pages
+/* Memory bitmap used on suspend for marking allocated pages that will contain
+ * the copies of saveable pages.  During resume it is initially used for
+ * marking the suspend image pages, but then its set bits are duplicated in
+ * @orig_bm and it is released.  Next, on systems with high memory, it may be
+ * used for marking "safe" highmem pages, but it has to be reinitialized for
+ * this purpose.
  */
 static struct memory_bitmap copy_bm;
 
 asmlinkage int swsusp_save(void)
 {
-       unsigned int nr_pages;
+       unsigned int nr_pages, nr_highmem;
 
-       pr_debug("swsusp: critical section: \n");
+       printk("swsusp: critical section: \n");
 
        drain_local_pages();
        nr_pages = count_data_pages();
-       printk("swsusp: Need to copy %u pages\n", nr_pages);
+       nr_highmem = count_highmem_pages();
+       printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
 
-       if (!enough_free_mem(nr_pages)) {
+       if (!enough_free_mem(nr_pages, nr_highmem)) {
                printk(KERN_ERR "swsusp: Not enough free memory\n");
                return -ENOMEM;
        }
 
-       if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages))
+       if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
+               printk(KERN_ERR "swsusp: Memory allocation failed\n");
                return -ENOMEM;
+       }
 
        /* During allocating of suspend pagedir, new cold pages may appear.
         * Kill them.
@@ -894,10 +1015,12 @@ asmlinkage int swsusp_save(void)
         * touch swap space! Except we must write out our image of course.
         */
 
+       nr_pages += nr_highmem;
        nr_copy_pages = nr_pages;
-       nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
 
        printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
+
        return 0;
 }
 
@@ -960,7 +1083,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
 
        if (!buffer) {
                /* This makes the buffer be freed by swsusp_free() */
-               buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+               buffer = get_image_page(GFP_ATOMIC, PG_ANY);
                if (!buffer)
                        return -ENOMEM;
        }
@@ -975,9 +1098,23 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
                        memset(buffer, 0, PAGE_SIZE);
                        pack_pfns(buffer, &orig_bm);
                } else {
-                       unsigned long pfn = memory_bm_next_pfn(&copy_bm);
+                       struct page *page;
 
-                       handle->buffer = page_address(pfn_to_page(pfn));
+                       page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
+                       if (PageHighMem(page)) {
+                               /* Highmem pages are copied to the buffer,
+                                * because we can't return with a kmapped
+                                * highmem page (we may not be called again).
+                                */
+                               void *kaddr;
+
+                               kaddr = kmap_atomic(page, KM_USER0);
+                               memcpy(buffer, kaddr, PAGE_SIZE);
+                               kunmap_atomic(kaddr, KM_USER0);
+                               handle->buffer = buffer;
+                       } else {
+                               handle->buffer = page_address(page);
+                       }
                }
                handle->prev = handle->cur;
        }
@@ -1005,7 +1142,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
        unsigned long pfn, max_zone_pfn;
 
        /* Clear page flags */
-       for_each_zone (zone) {
+       for_each_zone(zone) {
                max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
                        if (pfn_valid(pfn))
@@ -1101,6 +1238,218 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
        }
 }
 
+/* List of "safe" pages that may be used to store data loaded from the suspend
+ * image
+ */
+static struct linked_page *safe_pages_list;
+
+#ifdef CONFIG_HIGHMEM
+/* struct highmem_pbe is used for creating the list of highmem pages that
+ * should be restored atomically during the resume from disk, because the page
+ * frames they have occupied before the suspend are in use.
+ */
+struct highmem_pbe {
+       struct page *copy_page; /* data is here now */
+       struct page *orig_page; /* data was here before the suspend */
+       struct highmem_pbe *next;
+};
+
+/* List of highmem PBEs needed for restoring the highmem pages that were
+ * allocated before the suspend and included in the suspend image, but have
+ * also been allocated by the "resume" kernel, so their contents cannot be
+ * written directly to their "original" page frames.
+ */
+static struct highmem_pbe *highmem_pblist;
+
+/**
+ *     count_highmem_image_pages - compute the number of highmem pages in the
+ *     suspend image.  The bits in the memory bitmap @bm that correspond to the
+ *     image pages are assumed to be set.
+ */
+
+static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
+{
+       unsigned long pfn;
+       unsigned int cnt = 0;
+
+       memory_bm_position_reset(bm);
+       pfn = memory_bm_next_pfn(bm);
+       while (pfn != BM_END_OF_MAP) {
+               if (PageHighMem(pfn_to_page(pfn)))
+                       cnt++;
+
+               pfn = memory_bm_next_pfn(bm);
+       }
+       return cnt;
+}
+
+/**
+ *     prepare_highmem_image - try to allocate as many highmem pages as
+ *     there are highmem image pages (@nr_highmem_p points to the variable
+ *     containing the number of highmem image pages).  The pages that are
+ *     "safe" (ie. will not be overwritten when the suspend image is
+ *     restored) have the corresponding bits set in @bm (it must be
+ *     unitialized).
+ *
+ *     NOTE: This function should not be called if there are no highmem
+ *     image pages.
+ */
+
+static unsigned int safe_highmem_pages;
+
+static struct memory_bitmap *safe_highmem_bm;
+
+static int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+       unsigned int to_alloc;
+
+       if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
+               return -ENOMEM;
+
+       if (get_highmem_buffer(PG_SAFE))
+               return -ENOMEM;
+
+       to_alloc = count_free_highmem_pages();
+       if (to_alloc > *nr_highmem_p)
+               to_alloc = *nr_highmem_p;
+       else
+               *nr_highmem_p = to_alloc;
+
+       safe_highmem_pages = 0;
+       while (to_alloc-- > 0) {
+               struct page *page;
+
+               page = alloc_page(__GFP_HIGHMEM);
+               if (!PageNosaveFree(page)) {
+                       /* The page is "safe", set its bit the bitmap */
+                       memory_bm_set_bit(bm, page_to_pfn(page));
+                       safe_highmem_pages++;
+               }
+               /* Mark the page as allocated */
+               SetPageNosave(page);
+               SetPageNosaveFree(page);
+       }
+       memory_bm_position_reset(bm);
+       safe_highmem_bm = bm;
+       return 0;
+}
+
+/**
+ *     get_highmem_page_buffer - for given highmem image page find the buffer
+ *     that suspend_write_next() should set for its caller to write to.
+ *
+ *     If the page is to be saved to its "original" page frame or a copy of
+ *     the page is to be made in the highmem, @buffer is returned.  Otherwise,
+ *     the copy of the page is to be made in normal memory, so the address of
+ *     the copy is returned.
+ *
+ *     If @buffer is returned, the caller of suspend_write_next() will write
+ *     the page's contents to @buffer, so they will have to be copied to the
+ *     right location on the next call to suspend_write_next() and it is done
+ *     with the help of copy_last_highmem_page().  For this purpose, if
+ *     @buffer is returned, @last_highmem page is set to the page to which
+ *     the data will have to be copied from @buffer.
+ */
+
+static struct page *last_highmem_page;
+
+static void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+       struct highmem_pbe *pbe;
+       void *kaddr;
+
+       if (PageNosave(page) && PageNosaveFree(page)) {
+               /* We have allocated the "original" page frame and we can
+                * use it directly to store the loaded page.
+                */
+               last_highmem_page = page;
+               return buffer;
+       }
+       /* The "original" page frame has not been allocated and we have to
+        * use a "safe" page frame to store the loaded page.
+        */
+       pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
+       if (!pbe) {
+               swsusp_free();
+               return NULL;
+       }
+       pbe->orig_page = page;
+       if (safe_highmem_pages > 0) {
+               struct page *tmp;
+
+               /* Copy of the page will be stored in high memory */
+               kaddr = buffer;
+               tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
+               safe_highmem_pages--;
+               last_highmem_page = tmp;
+               pbe->copy_page = tmp;
+       } else {
+               /* Copy of the page will be stored in normal memory */
+               kaddr = safe_pages_list;
+               safe_pages_list = safe_pages_list->next;
+               pbe->copy_page = virt_to_page(kaddr);
+       }
+       pbe->next = highmem_pblist;
+       highmem_pblist = pbe;
+       return kaddr;
+}
+
+/**
+ *     copy_last_highmem_page - copy the contents of a highmem image from
+ *     @buffer, where the caller of snapshot_write_next() has place them,
+ *     to the right location represented by @last_highmem_page .
+ */
+
+static void copy_last_highmem_page(void)
+{
+       if (last_highmem_page) {
+               void *dst;
+
+               dst = kmap_atomic(last_highmem_page, KM_USER0);
+               memcpy(dst, buffer, PAGE_SIZE);
+               kunmap_atomic(dst, KM_USER0);
+               last_highmem_page = NULL;
+       }
+}
+
+static inline int last_highmem_page_copied(void)
+{
+       return !last_highmem_page;
+}
+
+static inline void free_highmem_data(void)
+{
+       if (safe_highmem_bm)
+               memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
+
+       if (buffer)
+               free_image_page(buffer, PG_UNSAFE_CLEAR);
+}
+#else
+static inline int get_safe_write_buffer(void) { return 0; }
+
+static unsigned int
+count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
+
+static inline int
+prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
+{
+       return 0;
+}
+
+static inline void *
+get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
+{
+       return NULL;
+}
+
+static inline void copy_last_highmem_page(void) {}
+static inline int last_highmem_page_copied(void) { return 1; }
+static inline void free_highmem_data(void) {}
+#endif /* CONFIG_HIGHMEM */
+
 /**
  *     prepare_image - use the memory bitmap @bm to mark the pages that will
  *     be overwritten in the process of restoring the system memory state
@@ -1110,20 +1459,25 @@ unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
  *     The idea is to allocate a new memory bitmap first and then allocate
  *     as many pages as needed for the image data, but not to assign these
  *     pages to specific tasks initially.  Instead, we just mark them as
- *     allocated and create a list of "safe" pages that will be used later.
+ *     allocated and create a lists of "safe" pages that will be used
+ *     later.  On systems with high memory a list of "safe" highmem pages is
+ *     also created.
  */
 
 #define PBES_PER_LINKED_PAGE   (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
 
-static struct linked_page *safe_pages_list;
-
 static int
 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 {
-       unsigned int nr_pages;
+       unsigned int nr_pages, nr_highmem;
        struct linked_page *sp_list, *lp;
        int error;
 
+       /* If there is no highmem, the buffer will not be necessary */
+       free_image_page(buffer, PG_UNSAFE_CLEAR);
+       buffer = NULL;
+
+       nr_highmem = count_highmem_image_pages(bm);
        error = mark_unsafe_pages(bm);
        if (error)
                goto Free;
@@ -1134,6 +1488,11 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 
        duplicate_memory_bitmap(new_bm, bm);
        memory_bm_free(bm, PG_UNSAFE_KEEP);
+       if (nr_highmem > 0) {
+               error = prepare_highmem_image(bm, &nr_highmem);
+               if (error)
+                       goto Free;
+       }
        /* Reserve some safe pages for potential later use.
         *
         * NOTE: This way we make sure there will be enough safe pages for the
@@ -1142,10 +1501,10 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
         */
        sp_list = NULL;
        /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
-       nr_pages = nr_copy_pages - allocated_unsafe_pages;
+       nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
        nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
        while (nr_pages > 0) {
-               lp = alloc_image_page(GFP_ATOMIC, PG_SAFE);
+               lp = get_image_page(GFP_ATOMIC, PG_SAFE);
                if (!lp) {
                        error = -ENOMEM;
                        goto Free;
@@ -1156,7 +1515,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
        }
        /* Preallocate memory for the image */
        safe_pages_list = NULL;
-       nr_pages = nr_copy_pages - allocated_unsafe_pages;
+       nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
        while (nr_pages > 0) {
                lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
                if (!lp) {
@@ -1181,7 +1540,7 @@ prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
        }
        return 0;
 
-Free:
+ Free:
        swsusp_free();
        return error;
 }
@@ -1196,6 +1555,9 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
        struct pbe *pbe;
        struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
 
+       if (PageHighMem(page))
+               return get_highmem_page_buffer(page, ca);
+
        if (PageNosave(page) && PageNosaveFree(page))
                /* We have allocated the "original" page frame and we can
                 * use it directly to store the loaded page.
@@ -1210,12 +1572,12 @@ static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
                swsusp_free();
                return NULL;
        }
-       pbe->orig_address = (unsigned long)page_address(page);
-       pbe->address = (unsigned long)safe_pages_list;
+       pbe->orig_address = page_address(page);
+       pbe->address = safe_pages_list;
        safe_pages_list = safe_pages_list->next;
        pbe->next = restore_pblist;
        restore_pblist = pbe;
-       return (void *)pbe->address;
+       return pbe->address;
 }
 
 /**
@@ -1249,14 +1611,16 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
        if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
                return 0;
 
-       if (!buffer) {
-               /* This makes the buffer be freed by swsusp_free() */
-               buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
+       if (handle->offset == 0) {
+               if (!buffer)
+                       /* This makes the buffer be freed by swsusp_free() */
+                       buffer = get_image_page(GFP_ATOMIC, PG_ANY);
+
                if (!buffer)
                        return -ENOMEM;
-       }
-       if (!handle->offset)
+
                handle->buffer = buffer;
+       }
        handle->sync_read = 1;
        if (handle->prev < handle->cur) {
                if (handle->prev == 0) {
@@ -1284,8 +1648,10 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
                                        return -ENOMEM;
                        }
                } else {
+                       copy_last_highmem_page();
                        handle->buffer = get_buffer(&orig_bm, &ca);
-                       handle->sync_read = 0;
+                       if (handle->buffer != buffer)
+                               handle->sync_read = 0;
                }
                handle->prev = handle->cur;
        }
@@ -1301,15 +1667,73 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count)
        return count;
 }
 
+/**
+ *     snapshot_write_finalize - must be called after the last call to
+ *     snapshot_write_next() in case the last page in the image happens
+ *     to be a highmem page and its contents should be stored in the
+ *     highmem.  Additionally, it releases the memory that will not be
+ *     used any more.
+ */
+
+void snapshot_write_finalize(struct snapshot_handle *handle)
+{
+       copy_last_highmem_page();
+       /* Free only if we have loaded the image entirely */
+       if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
+               memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+               free_highmem_data();
+       }
+}
+
 int snapshot_image_loaded(struct snapshot_handle *handle)
 {
-       return !(!nr_copy_pages ||
+       return !(!nr_copy_pages || !last_highmem_page_copied() ||
                        handle->cur <= nr_meta_pages + nr_copy_pages);
 }
 
-void snapshot_free_unused_memory(struct snapshot_handle *handle)
+#ifdef CONFIG_HIGHMEM
+/* Assumes that @buf is ready and points to a "safe" page */
+static inline void
+swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
 {
-       /* Free only if we have loaded the image entirely */
-       if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
-               memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
+       void *kaddr1, *kaddr2;
+
+       kaddr1 = kmap_atomic(p1, KM_USER0);
+       kaddr2 = kmap_atomic(p2, KM_USER1);
+       memcpy(buf, kaddr1, PAGE_SIZE);
+       memcpy(kaddr1, kaddr2, PAGE_SIZE);
+       memcpy(kaddr2, buf, PAGE_SIZE);
+       kunmap_atomic(kaddr1, KM_USER0);
+       kunmap_atomic(kaddr2, KM_USER1);
+}
+
+/**
+ *     restore_highmem - for each highmem page that was allocated before
+ *     the suspend and included in the suspend image, and also has been
+ *     allocated by the "resume" kernel swap its current (ie. "before
+ *     resume") contents with the previous (ie. "before suspend") one.
+ *
+ *     If the resume eventually fails, we can call this function once
+ *     again and restore the "before resume" highmem state.
+ */
+
+int restore_highmem(void)
+{
+       struct highmem_pbe *pbe = highmem_pblist;
+       void *buf;
+
+       if (!pbe)
+               return 0;
+
+       buf = get_image_page(GFP_ATOMIC, PG_SAFE);
+       if (!buf)
+               return -ENOMEM;
+
+       while (pbe) {
+               swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
+               pbe = pbe->next;
+       }
+       free_image_page(buf, PG_UNSAFE_CLEAR);
+       return 0;
 }
+#endif /* CONFIG_HIGHMEM */
index 1a3b0dd2c3fcc18b2db25fc7472560cb67edfb7e..f133d4a6d817bacfc2219476d72be55fb5d8face 100644 (file)
@@ -34,34 +34,123 @@ extern char resume_file[];
 #define SWSUSP_SIG     "S1SUSPEND"
 
 static struct swsusp_header {
-       char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
-       swp_entry_t image;
+       char reserved[PAGE_SIZE - 20 - sizeof(sector_t)];
+       sector_t image;
        char    orig_sig[10];
        char    sig[10];
 } __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
 
 /*
- * Saving part...
+ * General things
  */
 
 static unsigned short root_swap = 0xffff;
+static struct block_device *resume_bdev;
+
+/**
+ *     submit - submit BIO request.
+ *     @rw:    READ or WRITE.
+ *     @off    physical offset of page.
+ *     @page:  page we're reading or writing.
+ *     @bio_chain: list of pending biod (for async reading)
+ *
+ *     Straight from the textbook - allocate and initialize the bio.
+ *     If we're reading, make sure the page is marked as dirty.
+ *     Then submit it and, if @bio_chain == NULL, wait.
+ */
+static int submit(int rw, pgoff_t page_off, struct page *page,
+                       struct bio **bio_chain)
+{
+       struct bio *bio;
+
+       bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+       if (!bio)
+               return -ENOMEM;
+       bio->bi_sector = page_off * (PAGE_SIZE >> 9);
+       bio->bi_bdev = resume_bdev;
+       bio->bi_end_io = end_swap_bio_read;
+
+       if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+               printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
+               bio_put(bio);
+               return -EFAULT;
+       }
+
+       lock_page(page);
+       bio_get(bio);
+
+       if (bio_chain == NULL) {
+               submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+               wait_on_page_locked(page);
+               if (rw == READ)
+                       bio_set_pages_dirty(bio);
+               bio_put(bio);
+       } else {
+               if (rw == READ)
+                       get_page(page); /* These pages are freed later */
+               bio->bi_private = *bio_chain;
+               *bio_chain = bio;
+               submit_bio(rw | (1 << BIO_RW_SYNC), bio);
+       }
+       return 0;
+}
+
+static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+       return submit(READ, page_off, virt_to_page(addr), bio_chain);
+}
+
+static int bio_write_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
+{
+       return submit(WRITE, page_off, virt_to_page(addr), bio_chain);
+}
+
+static int wait_on_bio_chain(struct bio **bio_chain)
+{
+       struct bio *bio;
+       struct bio *next_bio;
+       int ret = 0;
+
+       if (bio_chain == NULL)
+               return 0;
+
+       bio = *bio_chain;
+       if (bio == NULL)
+               return 0;
+       while (bio) {
+               struct page *page;
+
+               next_bio = bio->bi_private;
+               page = bio->bi_io_vec[0].bv_page;
+               wait_on_page_locked(page);
+               if (!PageUptodate(page) || PageError(page))
+                       ret = -EIO;
+               put_page(page);
+               bio_put(bio);
+               bio = next_bio;
+       }
+       *bio_chain = NULL;
+       return ret;
+}
+
+/*
+ * Saving part
+ */
 
-static int mark_swapfiles(swp_entry_t start)
+static int mark_swapfiles(sector_t start)
 {
        int error;
 
-       rw_swap_page_sync(READ, swp_entry(root_swap, 0),
-                         virt_to_page((unsigned long)&swsusp_header), NULL);
+       bio_read_page(swsusp_resume_block, &swsusp_header, NULL);
        if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) ||
            !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
                memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
                memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
                swsusp_header.image = start;
-               error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0),
-                               virt_to_page((unsigned long)&swsusp_header),
-                               NULL);
+               error = bio_write_page(swsusp_resume_block,
+                                       &swsusp_header, NULL);
        } else {
-               pr_debug("swsusp: Partition is not swap space.\n");
+               printk(KERN_ERR "swsusp: Swap header not found!\n");
                error = -ENODEV;
        }
        return error;
@@ -74,12 +163,21 @@ static int mark_swapfiles(swp_entry_t start)
 
 static int swsusp_swap_check(void) /* This is called before saving image */
 {
-       int res = swap_type_of(swsusp_resume_device);
+       int res;
+
+       res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
+       if (res < 0)
+               return res;
+
+       root_swap = res;
+       resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_WRITE);
+       if (IS_ERR(resume_bdev))
+               return PTR_ERR(resume_bdev);
+
+       res = set_blocksize(resume_bdev, PAGE_SIZE);
+       if (res < 0)
+               blkdev_put(resume_bdev);
 
-       if (res >= 0) {
-               root_swap = res;
-               return 0;
-       }
        return res;
 }
 
@@ -90,36 +188,26 @@ static int swsusp_swap_check(void) /* This is called before saving image */
  *     @bio_chain:     Link the next write BIO here
  */
 
-static int write_page(void *buf, unsigned long offset, struct bio **bio_chain)
+static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
 {
-       swp_entry_t entry;
-       int error = -ENOSPC;
-
-       if (offset) {
-               struct page *page = virt_to_page(buf);
-
-               if (bio_chain) {
-                       /*
-                        * Whether or not we successfully allocated a copy page,
-                        * we take a ref on the page here.  It gets undone in
-                        * wait_on_bio_chain().
-                        */
-                       struct page *page_copy;
-                       page_copy = alloc_page(GFP_ATOMIC);
-                       if (page_copy == NULL) {
-                               WARN_ON_ONCE(1);
-                               bio_chain = NULL;       /* Go synchronous */
-                               get_page(page);
-                       } else {
-                               memcpy(page_address(page_copy),
-                                       page_address(page), PAGE_SIZE);
-                               page = page_copy;
-                       }
+       void *src;
+
+       if (!offset)
+               return -ENOSPC;
+
+       if (bio_chain) {
+               src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+               if (src) {
+                       memcpy(src, buf, PAGE_SIZE);
+               } else {
+                       WARN_ON_ONCE(1);
+                       bio_chain = NULL;       /* Go synchronous */
+                       src = buf;
                }
-               entry = swp_entry(root_swap, offset);
-               error = rw_swap_page_sync(WRITE, entry, page, bio_chain);
+       } else {
+               src = buf;
        }
-       return error;
+       return bio_write_page(offset, src, bio_chain);
 }
 
 /*
@@ -137,11 +225,11 @@ static int write_page(void *buf, unsigned long offset, struct bio **bio_chain)
  *     at a time.
  */
 
-#define MAP_PAGE_ENTRIES       (PAGE_SIZE / sizeof(long) - 1)
+#define MAP_PAGE_ENTRIES       (PAGE_SIZE / sizeof(sector_t) - 1)
 
 struct swap_map_page {
-       unsigned long           entries[MAP_PAGE_ENTRIES];
-       unsigned long           next_swap;
+       sector_t entries[MAP_PAGE_ENTRIES];
+       sector_t next_swap;
 };
 
 /**
@@ -151,7 +239,7 @@ struct swap_map_page {
 
 struct swap_map_handle {
        struct swap_map_page *cur;
-       unsigned long cur_swap;
+       sector_t cur_swap;
        struct bitmap_page *bitmap;
        unsigned int k;
 };
@@ -166,26 +254,6 @@ static void release_swap_writer(struct swap_map_handle *handle)
        handle->bitmap = NULL;
 }
 
-static void show_speed(struct timeval *start, struct timeval *stop,
-                       unsigned nr_pages, char *msg)
-{
-       s64 elapsed_centisecs64;
-       int centisecs;
-       int k;
-       int kps;
-
-       elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
-       do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
-       centisecs = elapsed_centisecs64;
-       if (centisecs == 0)
-               centisecs = 1;  /* avoid div-by-zero */
-       k = nr_pages * (PAGE_SIZE / 1024);
-       kps = (k * 100) / centisecs;
-       printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
-                       centisecs / 100, centisecs % 100,
-                       kps / 1000, (kps % 1000) / 10);
-}
-
 static int get_swap_writer(struct swap_map_handle *handle)
 {
        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
@@ -196,7 +264,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
                release_swap_writer(handle);
                return -ENOMEM;
        }
-       handle->cur_swap = alloc_swap_page(root_swap, handle->bitmap);
+       handle->cur_swap = alloc_swapdev_block(root_swap, handle->bitmap);
        if (!handle->cur_swap) {
                release_swap_writer(handle);
                return -ENOSPC;
@@ -205,43 +273,15 @@ static int get_swap_writer(struct swap_map_handle *handle)
        return 0;
 }
 
-static int wait_on_bio_chain(struct bio **bio_chain)
-{
-       struct bio *bio;
-       struct bio *next_bio;
-       int ret = 0;
-
-       if (bio_chain == NULL)
-               return 0;
-
-       bio = *bio_chain;
-       if (bio == NULL)
-               return 0;
-       while (bio) {
-               struct page *page;
-
-               next_bio = bio->bi_private;
-               page = bio->bi_io_vec[0].bv_page;
-               wait_on_page_locked(page);
-               if (!PageUptodate(page) || PageError(page))
-                       ret = -EIO;
-               put_page(page);
-               bio_put(bio);
-               bio = next_bio;
-       }
-       *bio_chain = NULL;
-       return ret;
-}
-
 static int swap_write_page(struct swap_map_handle *handle, void *buf,
                                struct bio **bio_chain)
 {
        int error = 0;
-       unsigned long offset;
+       sector_t offset;
 
        if (!handle->cur)
                return -EINVAL;
-       offset = alloc_swap_page(root_swap, handle->bitmap);
+       offset = alloc_swapdev_block(root_swap, handle->bitmap);
        error = write_page(buf, offset, bio_chain);
        if (error)
                return error;
@@ -250,7 +290,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
                error = wait_on_bio_chain(bio_chain);
                if (error)
                        goto out;
-               offset = alloc_swap_page(root_swap, handle->bitmap);
+               offset = alloc_swapdev_block(root_swap, handle->bitmap);
                if (!offset)
                        return -ENOSPC;
                handle->cur->next_swap = offset;
@@ -261,7 +301,7 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,
                handle->cur_swap = offset;
                handle->k = 0;
        }
-out:
+ out:
        return error;
 }
 
@@ -315,7 +355,7 @@ static int save_image(struct swap_map_handle *handle,
                error = err2;
        if (!error)
                printk("\b\b\b\bdone\n");
-       show_speed(&start, &stop, nr_to_write, "Wrote");
+       swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
        return error;
 }
 
@@ -350,100 +390,50 @@ int swsusp_write(void)
        struct swsusp_info *header;
        int error;
 
-       if ((error = swsusp_swap_check())) {
+       error = swsusp_swap_check();
+       if (error) {
                printk(KERN_ERR "swsusp: Cannot find swap device, try "
                                "swapon -a.\n");
                return error;
        }
        memset(&snapshot, 0, sizeof(struct snapshot_handle));
        error = snapshot_read_next(&snapshot, PAGE_SIZE);
-       if (error < PAGE_SIZE)
-               return error < 0 ? error : -EFAULT;
+       if (error < PAGE_SIZE) {
+               if (error >= 0)
+                       error = -EFAULT;
+
+               goto out;
+       }
        header = (struct swsusp_info *)data_of(snapshot);
        if (!enough_swap(header->pages)) {
                printk(KERN_ERR "swsusp: Not enough free swap\n");
-               return -ENOSPC;
+               error = -ENOSPC;
+               goto out;
        }
        error = get_swap_writer(&handle);
        if (!error) {
-               unsigned long start = handle.cur_swap;
+               sector_t start = handle.cur_swap;
+
                error = swap_write_page(&handle, header, NULL);
                if (!error)
                        error = save_image(&handle, &snapshot,
                                        header->pages - 1);
+
                if (!error) {
                        flush_swap_writer(&handle);
                        printk("S");
-                       error = mark_swapfiles(swp_entry(root_swap, start));
+                       error = mark_swapfiles(start);
                        printk("|\n");
                }
        }
        if (error)
                free_all_swap_pages(root_swap, handle.bitmap);
        release_swap_writer(&handle);
+ out:
+       swsusp_close();
        return error;
 }
 
-static struct block_device *resume_bdev;
-
-/**
- *     submit - submit BIO request.
- *     @rw:    READ or WRITE.
- *     @off    physical offset of page.
- *     @page:  page we're reading or writing.
- *     @bio_chain: list of pending biod (for async reading)
- *
- *     Straight from the textbook - allocate and initialize the bio.
- *     If we're reading, make sure the page is marked as dirty.
- *     Then submit it and, if @bio_chain == NULL, wait.
- */
-static int submit(int rw, pgoff_t page_off, struct page *page,
-                       struct bio **bio_chain)
-{
-       struct bio *bio;
-
-       bio = bio_alloc(GFP_ATOMIC, 1);
-       if (!bio)
-               return -ENOMEM;
-       bio->bi_sector = page_off * (PAGE_SIZE >> 9);
-       bio->bi_bdev = resume_bdev;
-       bio->bi_end_io = end_swap_bio_read;
-
-       if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
-               printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
-               bio_put(bio);
-               return -EFAULT;
-       }
-
-       lock_page(page);
-       bio_get(bio);
-
-       if (bio_chain == NULL) {
-               submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-               wait_on_page_locked(page);
-               if (rw == READ)
-                       bio_set_pages_dirty(bio);
-               bio_put(bio);
-       } else {
-               if (rw == READ)
-                       get_page(page); /* These pages are freed later */
-               bio->bi_private = *bio_chain;
-               *bio_chain = bio;
-               submit_bio(rw | (1 << BIO_RW_SYNC), bio);
-       }
-       return 0;
-}
-
-static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain)
-{
-       return submit(READ, page_off, virt_to_page(addr), bio_chain);
-}
-
-static int bio_write_page(pgoff_t page_off, void *addr)
-{
-       return submit(WRITE, page_off, virt_to_page(addr), NULL);
-}
-
 /**
  *     The following functions allow us to read data using a swap map
  *     in a file-alike way
@@ -456,17 +446,18 @@ static void release_swap_reader(struct swap_map_handle *handle)
        handle->cur = NULL;
 }
 
-static int get_swap_reader(struct swap_map_handle *handle,
-                                      swp_entry_t start)
+static int get_swap_reader(struct swap_map_handle *handle, sector_t start)
 {
        int error;
 
-       if (!swp_offset(start))
+       if (!start)
                return -EINVAL;
-       handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+
+       handle->cur = (struct swap_map_page *)get_zeroed_page(__GFP_WAIT | __GFP_HIGH);
        if (!handle->cur)
                return -ENOMEM;
-       error = bio_read_page(swp_offset(start), handle->cur, NULL);
+
+       error = bio_read_page(start, handle->cur, NULL);
        if (error) {
                release_swap_reader(handle);
                return error;
@@ -478,7 +469,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
 static int swap_read_page(struct swap_map_handle *handle, void *buf,
                                struct bio **bio_chain)
 {
-       unsigned long offset;
+       sector_t offset;
        int error;
 
        if (!handle->cur)
@@ -547,11 +538,11 @@ static int load_image(struct swap_map_handle *handle,
                error = err2;
        if (!error) {
                printk("\b\b\b\bdone\n");
-               snapshot_free_unused_memory(snapshot);
+               snapshot_write_finalize(snapshot);
                if (!snapshot_image_loaded(snapshot))
                        error = -ENODATA;
        }
-       show_speed(&start, &stop, nr_to_read, "Read");
+       swsusp_show_speed(&start, &stop, nr_to_read, "Read");
        return error;
 }
 
@@ -600,12 +591,16 @@ int swsusp_check(void)
        if (!IS_ERR(resume_bdev)) {
                set_blocksize(resume_bdev, PAGE_SIZE);
                memset(&swsusp_header, 0, sizeof(swsusp_header));
-               if ((error = bio_read_page(0, &swsusp_header, NULL)))
+               error = bio_read_page(swsusp_resume_block,
+                                       &swsusp_header, NULL);
+               if (error)
                        return error;
+
                if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
                        memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
                        /* Reset swap signature now */
-                       error = bio_write_page(0, &swsusp_header);
+                       error = bio_write_page(swsusp_resume_block,
+                                               &swsusp_header, NULL);
                } else {
                        return -EINVAL;
                }
index 0b66659dc516f13eed980412dc196781ffa6892e..31aa0390c777d186652fe1b5dc3bfb4aafbaa338 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/bootmem.h>
 #include <linux/syscalls.h>
 #include <linux/highmem.h>
+#include <linux/time.h>
 
 #include "power.h"
 
@@ -64,10 +65,8 @@ int in_suspend __nosavedata = 0;
 
 #ifdef CONFIG_HIGHMEM
 unsigned int count_highmem_pages(void);
-int save_highmem(void);
 int restore_highmem(void);
 #else
-static inline int save_highmem(void) { return 0; }
 static inline int restore_highmem(void) { return 0; }
 static inline unsigned int count_highmem_pages(void) { return 0; }
 #endif
@@ -134,18 +133,18 @@ static int bitmap_set(struct bitmap_page *bitmap, unsigned long bit)
        return 0;
 }
 
-unsigned long alloc_swap_page(int swap, struct bitmap_page *bitmap)
+sector_t alloc_swapdev_block(int swap, struct bitmap_page *bitmap)
 {
        unsigned long offset;
 
        offset = swp_offset(get_swap_page_of_type(swap));
        if (offset) {
-               if (bitmap_set(bitmap, offset)) {
+               if (bitmap_set(bitmap, offset))
                        swap_free(swp_entry(swap, offset));
-                       offset = 0;
-               }
+               else
+                       return swapdev_block(swap, offset);
        }
-       return offset;
+       return 0;
 }
 
 void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
@@ -165,6 +164,34 @@ void free_all_swap_pages(int swap, struct bitmap_page *bitmap)
        }
 }
 
+/**
+ *     swsusp_show_speed - print the time elapsed between two events represented by
+ *     @start and @stop
+ *
+ *     @nr_pages -     number of pages processed between @start and @stop
+ *     @msg -          introductory message to print
+ */
+
+void swsusp_show_speed(struct timeval *start, struct timeval *stop,
+                       unsigned nr_pages, char *msg)
+{
+       s64 elapsed_centisecs64;
+       int centisecs;
+       int k;
+       int kps;
+
+       elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
+       do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
+       centisecs = elapsed_centisecs64;
+       if (centisecs == 0)
+               centisecs = 1;  /* avoid div-by-zero */
+       k = nr_pages * (PAGE_SIZE / 1024);
+       kps = (k * 100) / centisecs;
+       printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
+                       centisecs / 100, centisecs % 100,
+                       kps / 1000, (kps % 1000) / 10);
+}
+
 /**
  *     swsusp_shrink_memory -  Try to free as much memory as needed
  *
@@ -184,23 +211,37 @@ static inline unsigned long __shrink_memory(long tmp)
 
 int swsusp_shrink_memory(void)
 {
-       long size, tmp;
+       long tmp;
        struct zone *zone;
        unsigned long pages = 0;
        unsigned int i = 0;
        char *p = "-\\|/";
+       struct timeval start, stop;
 
        printk("Shrinking memory...  ");
+       do_gettimeofday(&start);
        do {
-               size = 2 * count_highmem_pages();
-               size += size / 50 + count_data_pages() + PAGES_FOR_IO;
+               long size, highmem_size;
+
+               highmem_size = count_highmem_pages();
+               size = count_data_pages() + PAGES_FOR_IO;
                tmp = size;
+               size += highmem_size;
                for_each_zone (zone)
-                       if (!is_highmem(zone) && populated_zone(zone)) {
-                               tmp -= zone->free_pages;
-                               tmp += zone->lowmem_reserve[ZONE_NORMAL];
-                               tmp += snapshot_additional_pages(zone);
+                       if (populated_zone(zone)) {
+                               if (is_highmem(zone)) {
+                                       highmem_size -= zone->free_pages;
+                               } else {
+                                       tmp -= zone->free_pages;
+                                       tmp += zone->lowmem_reserve[ZONE_NORMAL];
+                                       tmp += snapshot_additional_pages(zone);
+                               }
                        }
+
+               if (highmem_size < 0)
+                       highmem_size = 0;
+
+               tmp += highmem_size;
                if (tmp > 0) {
                        tmp = __shrink_memory(tmp);
                        if (!tmp)
@@ -212,7 +253,9 @@ int swsusp_shrink_memory(void)
                }
                printk("\b%c", p[i++%4]);
        } while (tmp > 0);
+       do_gettimeofday(&stop);
        printk("\bdone (%lu pages freed)\n", pages);
+       swsusp_show_speed(&start, &stop, pages, "Freed");
 
        return 0;
 }
@@ -223,6 +266,7 @@ int swsusp_suspend(void)
 
        if ((error = arch_prepare_suspend()))
                return error;
+
        local_irq_disable();
        /* At this point, device_suspend() has been called, but *not*
         * device_power_down(). We *must* device_power_down() now.
@@ -235,23 +279,16 @@ int swsusp_suspend(void)
                goto Enable_irqs;
        }
 
-       if ((error = save_highmem())) {
-               printk(KERN_ERR "swsusp: Not enough free pages for highmem\n");
-               goto Restore_highmem;
-       }
-
        save_processor_state();
        if ((error = swsusp_arch_suspend()))
                printk(KERN_ERR "Error %d suspending\n", error);
        /* Restore control flow magically appears here */
        restore_processor_state();
-Restore_highmem:
-       restore_highmem();
        /* NOTE:  device_power_up() is just a resume() for devices
         * that suspended with irqs off ... no overall powerup.
         */
        device_power_up();
-Enable_irqs:
+ Enable_irqs:
        local_irq_enable();
        return error;
 }
@@ -268,18 +305,23 @@ int swsusp_resume(void)
                printk(KERN_ERR "Some devices failed to power down, very bad\n");
        /* We'll ignore saved state, but this gets preempt count (etc) right */
        save_processor_state();
-       error = swsusp_arch_resume();
-       /* Code below is only ever reached in case of failure. Otherwise
-        * execution continues at place where swsusp_arch_suspend was called
-         */
-       BUG_ON(!error);
+       error = restore_highmem();
+       if (!error) {
+               error = swsusp_arch_resume();
+               /* The code below is only ever reached in case of a failure.
+                * Otherwise execution continues at place where
+                * swsusp_arch_suspend() was called
+                */
+               BUG_ON(!error);
+               /* This call to restore_highmem() undos the previous one */
+               restore_highmem();
+       }
        /* The only reason why swsusp_arch_resume() can fail is memory being
         * very tight, so we have to free it as soon as we can to avoid
         * subsequent failures
         */
        swsusp_free();
        restore_processor_state();
-       restore_highmem();
        touch_softlockup_watchdog();
        device_power_up();
        local_irq_enable();
index d991d3b0e5a4e326ea29b6b7fc50c4a926dd5e3e..89443b85163baa012d5a785c3491c21fe047ec75 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
+#include <linux/reboot.h>
 #include <linux/string.h>
 #include <linux/device.h>
 #include <linux/miscdevice.h>
@@ -21,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/console.h>
 #include <linux/cpu.h>
+#include <linux/freezer.h>
 
 #include <asm/uaccess.h>
 
@@ -54,7 +56,8 @@ static int snapshot_open(struct inode *inode, struct file *filp)
        filp->private_data = data;
        memset(&data->handle, 0, sizeof(struct snapshot_handle));
        if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
-               data->swap = swsusp_resume_device ? swap_type_of(swsusp_resume_device) : -1;
+               data->swap = swsusp_resume_device ?
+                               swap_type_of(swsusp_resume_device, 0) : -1;
                data->mode = O_RDONLY;
        } else {
                data->swap = -1;
@@ -76,10 +79,10 @@ static int snapshot_release(struct inode *inode, struct file *filp)
        free_all_swap_pages(data->swap, data->bitmap);
        free_bitmap(data->bitmap);
        if (data->frozen) {
-               down(&pm_sem);
+               mutex_lock(&pm_mutex);
                thaw_processes();
                enable_nonboot_cpus();
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
        }
        atomic_inc(&device_available);
        return 0;
@@ -124,7 +127,8 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
 {
        int error = 0;
        struct snapshot_data *data;
-       loff_t offset, avail;
+       loff_t avail;
+       sector_t offset;
 
        if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
                return -ENOTTY;
@@ -140,7 +144,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
        case SNAPSHOT_FREEZE:
                if (data->frozen)
                        break;
-               down(&pm_sem);
+               mutex_lock(&pm_mutex);
                error = disable_nonboot_cpus();
                if (!error) {
                        error = freeze_processes();
@@ -150,7 +154,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                                error = -EBUSY;
                        }
                }
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
                if (!error)
                        data->frozen = 1;
                break;
@@ -158,10 +162,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
        case SNAPSHOT_UNFREEZE:
                if (!data->frozen)
                        break;
-               down(&pm_sem);
+               mutex_lock(&pm_mutex);
                thaw_processes();
                enable_nonboot_cpus();
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
                data->frozen = 0;
                break;
 
@@ -170,7 +174,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                        error = -EPERM;
                        break;
                }
-               down(&pm_sem);
+               mutex_lock(&pm_mutex);
                /* Free memory before shutting down devices. */
                error = swsusp_shrink_memory();
                if (!error) {
@@ -183,7 +187,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                        }
                        resume_console();
                }
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
                if (!error)
                        error = put_user(in_suspend, (unsigned int __user *)arg);
                if (!error)
@@ -191,13 +195,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                break;
 
        case SNAPSHOT_ATOMIC_RESTORE:
+               snapshot_write_finalize(&data->handle);
                if (data->mode != O_WRONLY || !data->frozen ||
                    !snapshot_image_loaded(&data->handle)) {
                        error = -EPERM;
                        break;
                }
-               snapshot_free_unused_memory(&data->handle);
-               down(&pm_sem);
+               mutex_lock(&pm_mutex);
                pm_prepare_console();
                suspend_console();
                error = device_suspend(PMSG_PRETHAW);
@@ -207,7 +211,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                }
                resume_console();
                pm_restore_console();
-               up(&pm_sem);
+               mutex_unlock(&pm_mutex);
                break;
 
        case SNAPSHOT_FREE:
@@ -238,10 +242,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                                break;
                        }
                }
-               offset = alloc_swap_page(data->swap, data->bitmap);
+               offset = alloc_swapdev_block(data->swap, data->bitmap);
                if (offset) {
                        offset <<= PAGE_SHIFT;
-                       error = put_user(offset, (loff_t __user *)arg);
+                       error = put_user(offset, (sector_t __user *)arg);
                } else {
                        error = -ENOSPC;
                }
@@ -264,7 +268,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                         * so we need to recode them
                         */
                        if (old_decode_dev(arg)) {
-                               data->swap = swap_type_of(old_decode_dev(arg));
+                               data->swap = swap_type_of(old_decode_dev(arg), 0);
                                if (data->swap < 0)
                                        error = -ENODEV;
                        } else {
@@ -282,7 +286,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                        break;
                }
 
-               if (down_trylock(&pm_sem)) {
+               if (!mutex_trylock(&pm_mutex)) {
                        error = -EBUSY;
                        break;
                }
@@ -309,8 +313,66 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                if (pm_ops->finish)
                        pm_ops->finish(PM_SUSPEND_MEM);
 
-OutS3:
-               up(&pm_sem);
+ OutS3:
+               mutex_unlock(&pm_mutex);
+               break;
+
+       case SNAPSHOT_PMOPS:
+               switch (arg) {
+
+               case PMOPS_PREPARE:
+                       if (pm_ops->prepare) {
+                               error = pm_ops->prepare(PM_SUSPEND_DISK);
+                       }
+                       break;
+
+               case PMOPS_ENTER:
+                       kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
+                       error = pm_ops->enter(PM_SUSPEND_DISK);
+                       break;
+
+               case PMOPS_FINISH:
+                       if (pm_ops && pm_ops->finish) {
+                               pm_ops->finish(PM_SUSPEND_DISK);
+                       }
+                       break;
+
+               default:
+                       printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
+                       error = -EINVAL;
+
+               }
+               break;
+
+       case SNAPSHOT_SET_SWAP_AREA:
+               if (data->bitmap) {
+                       error = -EPERM;
+               } else {
+                       struct resume_swap_area swap_area;
+                       dev_t swdev;
+
+                       error = copy_from_user(&swap_area, (void __user *)arg,
+                                       sizeof(struct resume_swap_area));
+                       if (error) {
+                               error = -EFAULT;
+                               break;
+                       }
+
+                       /*
+                        * User space encodes device types as two-byte values,
+                        * so we need to recode them
+                        */
+                       swdev = old_decode_dev(swap_area.dev);
+                       if (swdev) {
+                               offset = swap_area.offset;
+                               data->swap = swap_type_of(swdev, offset);
+                               if (data->swap < 0)
+                                       error = -ENODEV;
+                       } else {
+                               data->swap = -1;
+                               error = -EINVAL;
+                       }
+               }
                break;
 
        default:
@@ -321,7 +383,7 @@ OutS3:
        return error;
 }
 
-static struct file_operations snapshot_fops = {
+static const struct file_operations snapshot_fops = {
        .open = snapshot_open,
        .release = snapshot_release,
        .read = snapshot_read,
index 66426552fbfef24e49442ed08f8d595930feca32..185bb45eacf7289db6b99039634ad9f9f6fa5d96 100644 (file)
@@ -53,8 +53,6 @@ int console_printk[4] = {
        DEFAULT_CONSOLE_LOGLEVEL,       /* default_console_loglevel */
 };
 
-EXPORT_UNUSED_SYMBOL(console_printk);  /*  June 2006  */
-
 /*
  * Low lever drivers may need that to know if they can schedule in
  * their unblank() callback or not. So let's export it.
@@ -335,13 +333,25 @@ static void __call_console_drivers(unsigned long start, unsigned long end)
        }
 }
 
+static int __read_mostly ignore_loglevel;
+
+int __init ignore_loglevel_setup(char *str)
+{
+       ignore_loglevel = 1;
+       printk(KERN_INFO "debug: ignoring loglevel setting.\n");
+
+       return 1;
+}
+
+__setup("ignore_loglevel", ignore_loglevel_setup);
+
 /*
  * Write out chars from start to end - 1 inclusive
  */
 static void _call_console_drivers(unsigned long start,
                                unsigned long end, int msg_log_level)
 {
-       if (msg_log_level < console_loglevel &&
+       if ((msg_log_level < console_loglevel || ignore_loglevel) &&
                        console_drivers && start != end) {
                if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
                        /* wrapped write */
@@ -631,12 +641,7 @@ EXPORT_SYMBOL(vprintk);
 
 asmlinkage long sys_syslog(int type, char __user *buf, int len)
 {
-       return 0;
-}
-
-int do_syslog(int type, char __user *buf, int len)
-{
-       return 0;
+       return -ENOSYS;
 }
 
 static void call_console_drivers(unsigned long start, unsigned long end)
@@ -777,7 +782,6 @@ int is_console_locked(void)
 {
        return console_locked;
 }
-EXPORT_UNUSED_SYMBOL(is_console_locked);  /*  June 2006  */
 
 /**
  * release_console_sem - unlock the console system
index f940b462eec9a34eb7c3cad4ee958a2ec0f1f5bc..fb5e03d57e9dac86b7aadb3b7c5496a467d5ba15 100644 (file)
@@ -40,7 +40,7 @@ int (*timer_hook)(struct pt_regs *) __read_mostly;
 
 static atomic_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
-static int prof_on __read_mostly;
+int prof_on __read_mostly;
 static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
 #ifdef CONFIG_SMP
 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
@@ -51,9 +51,19 @@ static DEFINE_MUTEX(profile_flip_mutex);
 static int __init profile_setup(char * str)
 {
        static char __initdata schedstr[] = "schedule";
+       static char __initdata sleepstr[] = "sleep";
        int par;
 
-       if (!strncmp(str, schedstr, strlen(schedstr))) {
+       if (!strncmp(str, sleepstr, strlen(sleepstr))) {
+               prof_on = SLEEP_PROFILING;
+               if (str[strlen(sleepstr)] == ',')
+                       str += strlen(sleepstr) + 1;
+               if (get_option(&str, &par))
+                       prof_shift = par;
+               printk(KERN_INFO
+                       "kernel sleep profiling enabled (shift: %ld)\n",
+                       prof_shift);
+       } else if (!strncmp(str, sleepstr, strlen(sleepstr))) {
                prof_on = SCHED_PROFILING;
                if (str[strlen(schedstr)] == ',')
                        str += strlen(schedstr) + 1;
@@ -204,7 +214,8 @@ EXPORT_SYMBOL_GPL(profile_event_unregister);
  * positions to which hits are accounted during short intervals (e.g.
  * several seconds) is usually very small. Exclusion from buffer
  * flipping is provided by interrupt disablement (note that for
- * SCHED_PROFILING profile_hit() may be called from process context).
+ * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
+ * process context).
  * The hash function is meant to be lightweight as opposed to strong,
  * and was vaguely inspired by ppc64 firmware-supported inverted
  * pagetable hash functions, but uses a full hashtable full of finite
@@ -257,7 +268,7 @@ static void profile_discard_flip_buffers(void)
        mutex_unlock(&profile_flip_mutex);
 }
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
        unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
        int i, j, cpu;
@@ -274,21 +285,31 @@ void profile_hit(int type, void *__pc)
                put_cpu();
                return;
        }
+       /*
+        * We buffer the global profiler buffer into a per-CPU
+        * queue and thus reduce the number of global (and possibly
+        * NUMA-alien) accesses. The write-queue is self-coalescing:
+        */
        local_irq_save(flags);
        do {
                for (j = 0; j < PROFILE_GRPSZ; ++j) {
                        if (hits[i + j].pc == pc) {
-                               hits[i + j].hits++;
+                               hits[i + j].hits += nr_hits;
                                goto out;
                        } else if (!hits[i + j].hits) {
                                hits[i + j].pc = pc;
-                               hits[i + j].hits = 1;
+                               hits[i + j].hits = nr_hits;
                                goto out;
                        }
                }
                i = (i + secondary) & (NR_PROFILE_HIT - 1);
        } while (i != primary);
-       atomic_inc(&prof_buffer[pc]);
+
+       /*
+        * Add the current hit(s) and flush the write-queue out
+        * to the global buffer:
+        */
+       atomic_add(nr_hits, &prof_buffer[pc]);
        for (i = 0; i < NR_PROFILE_HIT; ++i) {
                atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
                hits[i].pc = hits[i].hits = 0;
@@ -298,7 +319,6 @@ out:
        put_cpu();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int __devinit profile_cpu_callback(struct notifier_block *info,
                                        unsigned long action, void *__cpu)
 {
@@ -351,19 +371,19 @@ static int __devinit profile_cpu_callback(struct notifier_block *info,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 #else /* !CONFIG_SMP */
 #define profile_flip_buffers()         do { } while (0)
 #define profile_discard_flip_buffers() do { } while (0)
+#define profile_cpu_callback           NULL
 
-void profile_hit(int type, void *__pc)
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
        unsigned long pc;
 
        if (prof_on != type || !prof_buffer)
                return;
        pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
-       atomic_inc(&prof_buffer[min(pc, prof_len - 1)]);
+       atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
 
@@ -442,7 +462,8 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        read = 0;
 
        while (p < sizeof(unsigned int) && count > 0) {
-               put_user(*((char *)(&sample_step)+p),buf);
+               if (put_user(*((char *)(&sample_step)+p),buf))
+                       return -EFAULT;
                buf++; p++; count--; read++;
        }
        pnt = (char *)prof_buffer + p - sizeof(atomic_t);
@@ -480,7 +501,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
        return count;
 }
 
-static struct file_operations proc_profile_operations = {
+static const struct file_operations proc_profile_operations = {
        .read           = read_profile,
        .write          = write_profile,
 };
index 26bb5ffe1ef14ef61824180dd6163acdc104006d..3554b76da84cf08ebc2808b5f2a237a362e52330 100644 (file)
@@ -235,12 +235,14 @@ static void rcu_do_batch(struct rcu_data *rdp)
 
        list = rdp->donelist;
        while (list) {
-               next = rdp->donelist = list->next;
+               next = list->next;
+               prefetch(next);
                list->func(list);
                list = next;
                if (++count >= rdp->blimit)
                        break;
        }
+       rdp->donelist = list;
 
        local_irq_disable();
        rdp->qlen -= count;
index e2bda18f6f42779144829a2150b4e6fb8ac4b338..c52f981ea0086e569b37c5195cf291c6be55d446 100644 (file)
@@ -401,7 +401,7 @@ static void srcu_torture_cleanup(void)
        cleanup_srcu_struct(&srcu_ctl);
 }
 
-static int srcu_torture_read_lock(void)
+static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 {
        return srcu_read_lock(&srcu_ctl);
 }
@@ -419,7 +419,7 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
                schedule_timeout_interruptible(longdelay);
 }
 
-static void srcu_torture_read_unlock(int idx)
+static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 {
        srcu_read_unlock(&srcu_ctl, idx);
 }
index f04bbdb56ac2a9123003b45d909f9ef6e971fd5e..75a3a9a7efc215bc71ff80e61fff1441a9070b03 100644 (file)
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = {
  *     reason waking is deferred is that calling directly from write
  *     causes problems if you're writing from say the scheduler.
  */
-static void wakeup_readers(void *private)
+static void wakeup_readers(struct work_struct *work)
 {
-       struct rchan_buf *buf = private;
+       struct rchan_buf *buf =
+               container_of(work, struct rchan_buf, wake_readers.work);
        wake_up_interruptible(&buf->read_wait);
 }
 
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
        if (init) {
                init_waitqueue_head(&buf->read_wait);
                kref_init(&buf->kref);
-               INIT_WORK(&buf->wake_readers, NULL, NULL);
+               INIT_DELAYED_WORK(&buf->wake_readers, NULL);
        } else {
                cancel_delayed_work(&buf->wake_readers);
                flush_scheduled_work();
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
                        buf->padding[old_subbuf];
                smp_mb();
                if (waitqueue_active(&buf->read_wait)) {
-                       PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
+                       PREPARE_DELAYED_WORK(&buf->wake_readers,
+                                            wakeup_readers);
                        schedule_delayed_work(&buf->wake_readers, 1);
                }
        }
@@ -1011,7 +1013,7 @@ static ssize_t relay_file_sendfile(struct file *filp,
                                       actor, &desc);
 }
 
-struct file_operations relay_file_operations = {
+const struct file_operations relay_file_operations = {
        .open           = relay_file_open,
        .poll           = relay_file_poll,
        .mmap           = relay_file_mmap,
index 6de60c12143e63f539701c3bc1687f3f21d0df9d..7b9a497419d9501631e5a524582d6b812fb5e841 100644 (file)
@@ -88,7 +88,7 @@ static int r_show(struct seq_file *m, void *v)
        return 0;
 }
 
-static struct seq_operations resource_op = {
+static const struct seq_operations resource_op = {
        .start  = r_start,
        .next   = r_next,
        .stop   = r_stop,
@@ -115,14 +115,14 @@ static int iomem_open(struct inode *inode, struct file *file)
        return res;
 }
 
-static struct file_operations proc_ioports_operations = {
+static const struct file_operations proc_ioports_operations = {
        .open           = ioports_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
 };
 
-static struct file_operations proc_iomem_operations = {
+static const struct file_operations proc_iomem_operations = {
        .open           = iomem_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index 6dcea9dd8c94a23ffa4fcb1dd21ea6ba685692bf..015fc633c96c7338f8ffb3e01cc1ce3a17e72030 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/sysdev.h>
 #include <linux/timer.h>
+#include <linux/freezer.h>
 
 #include "rtmutex.h"
 
index 3399701c680e392f46e21829cee8da0bf5482303..f385eff4682d7fbf169accde4c93d0bf4f42fd83 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
@@ -505,7 +505,7 @@ static int schedstat_open(struct inode *inode, struct file *file)
        return res;
 }
 
-struct file_operations proc_schedstat_operations = {
+const struct file_operations proc_schedstat_operations = {
        .open    = schedstat_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
@@ -948,6 +948,17 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local)
        }
 #endif
 
+       /*
+        * Sleep time is in units of nanosecs, so shift by 20 to get a
+        * milliseconds-range estimation of the amount of time that the task
+        * spent sleeping:
+        */
+       if (unlikely(prof_on == SLEEP_PROFILING)) {
+               if (p->state == TASK_UNINTERRUPTIBLE)
+                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
+                                    (now - p->timestamp) >> 20);
+       }
+
        if (!rt_task(p))
                p->prio = recalc_task_prio(p, now);
 
@@ -3333,6 +3344,7 @@ asmlinkage void __sched schedule(void)
                printk(KERN_ERR "BUG: scheduling while atomic: "
                        "%s/0x%08x/%d\n",
                        current->comm, preempt_count(), current->pid);
+               debug_show_held_locks(current);
                dump_stack();
        }
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
@@ -4804,18 +4816,18 @@ static void show_task(struct task_struct *p)
                show_stack(p, NULL);
 }
 
-void show_state(void)
+void show_state_filter(unsigned long state_filter)
 {
        struct task_struct *g, *p;
 
 #if (BITS_PER_LONG == 32)
        printk("\n"
-              "                                               sibling\n");
-       printk("  task             PC      pid father child younger older\n");
+              "                         free                        sibling\n");
+       printk("  task             PC    stack   pid father child younger older\n");
 #else
        printk("\n"
-              "                                                       sibling\n");
-       printk("  task                 PC          pid father child younger older\n");
+              "                                 free                        sibling\n");
+       printk("  task                 PC        stack   pid father child younger older\n");
 #endif
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
@@ -4824,11 +4836,16 @@ void show_state(void)
                 * console might take alot of time:
                 */
                touch_nmi_watchdog();
-               show_task(p);
+               if (p->state & state_filter)
+                       show_task(p);
        } while_each_thread(g, p);
 
        read_unlock(&tasklist_lock);
-       debug_show_all_locks();
+       /*
+        * Only show locks if all tasks are dumped:
+        */
+       if (state_filter == -1)
+               debug_show_all_locks();
 }
 
 /**
@@ -6723,8 +6740,6 @@ SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
            sched_smt_power_savings_store);
 #endif
 
-
-#ifdef CONFIG_HOTPLUG_CPU
 /*
  * Force a reinitialization of the sched domains hierarchy.  The domains
  * and groups cannot be updated in place without racing with the balancing
@@ -6757,7 +6772,6 @@ static int update_sched_domains(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
-#endif
 
 void __init sched_init_smp(void)
 {
@@ -6867,6 +6881,7 @@ void __might_sleep(char *file, int line)
                                " context at %s:%d\n", file, line);
                printk("in_atomic():%d, irqs_disabled():%d\n",
                        in_atomic(), irqs_disabled());
+               debug_show_held_locks(current);
                dump_stack();
        }
 #endif
index df18c167a2a72d67d76cabc1768b741c12ed5e7e..ec81defde33946380518215cab07e309876360ed 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ptrace.h>
 #include <linux/signal.h>
 #include <linux/capability.h>
+#include <linux/freezer.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -33,7 +34,7 @@
  * SLAB caches for signal bits.
  */
 
-static kmem_cache_t *sigqueue_cachep;
+static struct kmem_cache *sigqueue_cachep;
 
 /*
  * In POSIX a signal is sent either to a specific thread (Linux task)
@@ -1133,8 +1134,7 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
        return error;
 }
 
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
        rcu_read_lock();
index bf25015dce162a541059c051567c3569069985b9..918e52df090e88ee0a03096bee651e6c3a4d6dce 100644 (file)
@@ -574,8 +574,6 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
 
        switch (action) {
        case CPU_UP_PREPARE:
-               BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
-               BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
                p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
                if (IS_ERR(p)) {
                        printk("ksoftirqd for %i failed\n", hotcpu);
index 98489d82801be030cb3477589ed952291e19eb7e..a0c1a29a507fccc234d699093a84983f77a963fd 100644 (file)
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
        return 0;
 }
 
-static void deferred_cad(void *dummy)
+static void deferred_cad(struct work_struct *dummy)
 {
        kernel_restart(NULL);
 }
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy)
  */
 void ctrl_alt_del(void)
 {
-       static DECLARE_WORK(cad_work, deferred_cad, NULL);
+       static DECLARE_WORK(cad_work, deferred_cad);
 
        if (C_A_D)
                schedule_work(&cad_work);
@@ -1102,14 +1102,14 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
 asmlinkage long sys_setuid(uid_t uid)
 {
        int old_euid = current->euid;
-       int old_ruid, old_suid, new_ruid, new_suid;
+       int old_ruid, old_suid, new_suid;
        int retval;
 
        retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
        if (retval)
                return retval;
 
-       old_ruid = new_ruid = current->uid;
+       old_ruid = current->uid;
        old_suid = current->suid;
        new_suid = old_suid;
        
index 09e569f4792be6a80e08924e6ef98245b70f6d95..8e9f00fd6d18f0a63e40806ff066e7e0aebfae20 100644 (file)
@@ -54,6 +54,7 @@ extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
 
 #ifdef CONFIG_X86
 #include <asm/nmi.h>
+#include <asm/stacktrace.h>
 #endif
 
 #if defined(CONFIG_SYSCTL)
@@ -170,7 +171,7 @@ static ssize_t proc_readsys(struct file *, char __user *, size_t, loff_t *);
 static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *);
 static int proc_opensys(struct inode *, struct file *);
 
-struct file_operations proc_sys_file_operations = {
+const struct file_operations proc_sys_file_operations = {
        .open           = proc_opensys,
        .read           = proc_readsys,
        .write          = proc_writesys,
@@ -707,6 +708,14 @@ static ctl_table kern_table[] = {
                .mode           = 0444,
                .proc_handler   = &proc_dointvec,
        },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "kstack_depth_to_print",
+               .data           = &kstack_depth_to_print,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec,
+       },
 #endif
 #if defined(CONFIG_MMU)
        {
@@ -977,17 +986,6 @@ static ctl_table vm_table[] = {
                .extra1         = &zero,
        },
 #endif
-#ifdef CONFIG_SWAP
-       {
-               .ctl_name       = VM_SWAP_TOKEN_TIMEOUT,
-               .procname       = "swap_token_timeout",
-               .data           = &swap_token_default_timeout,
-               .maxlen         = sizeof(swap_token_default_timeout),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_jiffies,
-               .strategy       = &sysctl_jiffies,
-       },
-#endif
 #ifdef CONFIG_NUMA
        {
                .ctl_name       = VM_ZONE_RECLAIM_MODE,
@@ -1886,7 +1884,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
                        p = buf;
                        if (*p == '-' && left > 1) {
                                neg = 1;
-                               left--, p++;
+                               p++;
                        }
                        if (*p < '0' || *p > '9')
                                break;
@@ -2137,7 +2135,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
                        p = buf;
                        if (*p == '-' && left > 1) {
                                neg = 1;
-                               left--, p++;
+                               p++;
                        }
                        if (*p < '0' || *p > '9')
                                break;
index d3d28919d4b494a026701fb4c87426c99111fe7f..4c3476fa058d5627650779f0559dc17c3a7cb1b8 100644 (file)
@@ -34,7 +34,7 @@
 
 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
 static int family_registered;
-kmem_cache_t *taskstats_cache;
+struct kmem_cache *taskstats_cache;
 
 static struct genl_family family = {
        .id             = GENL_ID_GENERATE,
@@ -69,7 +69,7 @@ enum actions {
 };
 
 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
-                       void **replyp, size_t size)
+                               size_t size)
 {
        struct sk_buff *skb;
        void *reply;
@@ -94,7 +94,6 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
        }
 
        *skbp = skb;
-       *replyp = reply;
        return 0;
 }
 
@@ -119,10 +118,10 @@ static int send_reply(struct sk_buff *skb, pid_t pid)
 /*
  * Send taskstats data in @skb to listeners registered for @cpu's exit data
  */
-static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
+static void send_cpu_listeners(struct sk_buff *skb,
+                                       struct listener_list *listeners)
 {
        struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
-       struct listener_list *listeners;
        struct listener *s, *tmp;
        struct sk_buff *skb_next, *skb_cur = skb;
        void *reply = genlmsg_data(genlhdr);
@@ -135,7 +134,6 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
        }
 
        rc = 0;
-       listeners = &per_cpu(listener_array, cpu);
        down_read(&listeners->sem);
        list_for_each_entry(s, &listeners->list, list) {
                skb_next = NULL;
@@ -186,6 +184,7 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
        } else
                get_task_struct(tsk);
 
+       memset(stats, 0, sizeof(*stats));
        /*
         * Each accounting subsystem adds calls to its functions to
         * fill in relevant parts of struct taskstsats as follows
@@ -228,6 +227,8 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
 
        if (first->signal->stats)
                memcpy(stats, first->signal->stats, sizeof(*stats));
+       else
+               memset(stats, 0, sizeof(*stats));
 
        tsk = first;
        do {
@@ -344,14 +345,36 @@ static int parse(struct nlattr *na, cpumask_t *mask)
        return ret;
 }
 
+static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
+{
+       struct nlattr *na, *ret;
+       int aggr;
+
+       aggr = (type == TASKSTATS_TYPE_PID)
+                       ? TASKSTATS_TYPE_AGGR_PID
+                       : TASKSTATS_TYPE_AGGR_TGID;
+
+       na = nla_nest_start(skb, aggr);
+       if (!na)
+               goto err;
+       if (nla_put(skb, type, sizeof(pid), &pid) < 0)
+               goto err;
+       ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
+       if (!ret)
+               goto err;
+       nla_nest_end(skb, na);
+
+       return nla_data(ret);
+err:
+       return NULL;
+}
+
 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
 {
        int rc = 0;
        struct sk_buff *rep_skb;
-       struct taskstats stats;
-       void *reply;
+       struct taskstats *stats;
        size_t size;
-       struct nlattr *na;
        cpumask_t mask;
 
        rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
@@ -372,83 +395,71 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
        size = nla_total_size(sizeof(u32)) +
                nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-       memset(&stats, 0, sizeof(stats));
-       rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
+       rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
        if (rc < 0)
                return rc;
 
+       rc = -EINVAL;
        if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
                u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
-               rc = fill_pid(pid, NULL, &stats);
-               if (rc < 0)
+               stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
+               if (!stats)
                        goto err;
 
-               na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-               NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
-               NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-                               stats);
+               rc = fill_pid(pid, NULL, stats);
+               if (rc < 0)
+                       goto err;
        } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
                u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
-               rc = fill_tgid(tgid, NULL, &stats);
-               if (rc < 0)
+               stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
+               if (!stats)
                        goto err;
 
-               na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-               NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
-               NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-                               stats);
-       } else {
-               rc = -EINVAL;
+               rc = fill_tgid(tgid, NULL, stats);
+               if (rc < 0)
+                       goto err;
+       } else
                goto err;
-       }
-
-       nla_nest_end(rep_skb, na);
 
        return send_reply(rep_skb, info->snd_pid);
-
-nla_put_failure:
-       rc = genlmsg_cancel(rep_skb, reply);
 err:
        nlmsg_free(rep_skb);
        return rc;
 }
 
-void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
+static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
 {
-       struct listener_list *listeners;
-       struct taskstats *tmp;
-       /*
-        * This is the cpu on which the task is exiting currently and will
-        * be the one for which the exit event is sent, even if the cpu
-        * on which this function is running changes later.
-        */
-       *mycpu = raw_smp_processor_id();
+       struct signal_struct *sig = tsk->signal;
+       struct taskstats *stats;
 
-       *ptidstats = NULL;
-       tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-       if (!tmp)
-               return;
+       if (sig->stats || thread_group_empty(tsk))
+               goto ret;
 
-       listeners = &per_cpu(listener_array, *mycpu);
-       down_read(&listeners->sem);
-       if (!list_empty(&listeners->list)) {
-               *ptidstats = tmp;
-               tmp = NULL;
+       /* No problem if kmem_cache_zalloc() fails */
+       stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (!sig->stats) {
+               sig->stats = stats;
+               stats = NULL;
        }
-       up_read(&listeners->sem);
-       kfree(tmp);
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (stats)
+               kmem_cache_free(taskstats_cache, stats);
+ret:
+       return sig->stats;
 }
 
 /* Send pid data out on exit */
-void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
-                       int group_dead, unsigned int mycpu)
+void taskstats_exit(struct task_struct *tsk, int group_dead)
 {
        int rc;
+       struct listener_list *listeners;
+       struct taskstats *stats;
        struct sk_buff *rep_skb;
-       void *reply;
        size_t size;
        int is_thread_group;
-       struct nlattr *na;
 
        if (!family_registered)
                return;
@@ -459,7 +470,7 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
        size = nla_total_size(sizeof(u32)) +
                nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
 
-       is_thread_group = (tsk->signal->stats != NULL);
+       is_thread_group = !!taskstats_tgid_alloc(tsk);
        if (is_thread_group) {
                /* PID + STATS + TGID + STATS */
                size = 2 * size;
@@ -467,49 +478,39 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
                fill_tgid_exit(tsk);
        }
 
-       if (!tidstats)
+       listeners = &__raw_get_cpu_var(listener_array);
+       if (list_empty(&listeners->list))
                return;
 
-       rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
-       if (rc < 0)
-               goto ret;
-
-       rc = fill_pid(tsk->pid, tsk, tidstats);
+       rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
        if (rc < 0)
-               goto err_skb;
+               return;
 
-       na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
-       NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
-       NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-                       *tidstats);
-       nla_nest_end(rep_skb, na);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
+       if (!stats)
+               goto err;
 
-       if (!is_thread_group)
-               goto send;
+       rc = fill_pid(tsk->pid, tsk, stats);
+       if (rc < 0)
+               goto err;
 
        /*
         * Doesn't matter if tsk is the leader or the last group member leaving
         */
-       if (!group_dead)
+       if (!is_thread_group || !group_dead)
                goto send;
 
-       na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
-       NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
-       /* No locking needed for tsk->signal->stats since group is dead */
-       NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
-                       *tsk->signal->stats);
-       nla_nest_end(rep_skb, na);
+       stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
+       if (!stats)
+               goto err;
+
+       memcpy(stats, tsk->signal->stats, sizeof(*stats));
 
 send:
-       send_cpu_listeners(rep_skb, mycpu);
+       send_cpu_listeners(rep_skb, listeners);
        return;
-
-nla_put_failure:
-       genlmsg_cancel(rep_skb, reply);
-err_skb:
+err:
        nlmsg_free(rep_skb);
-ret:
-       return;
 }
 
 static struct genl_ops taskstats_ops = {
index ed0a21d4a902339acef700eea0e291220ce7fb2a..09c261329249dd5730d7694e214af8240385b48f 100644 (file)
 #include <linux/bootmem.h>
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
+#include <linux/uaccess.h>
 #include <asm/sections.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 
-extern char __start_unwind[], __end_unwind[];
+extern const char __start_unwind[], __end_unwind[];
 extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
 
 #define MAX_STACK_DEPTH 8
@@ -94,6 +95,7 @@ static const struct {
 
 typedef unsigned long uleb128_t;
 typedef   signed long sleb128_t;
+#define sleb128abs __builtin_labs
 
 static struct unwind_table {
        struct {
@@ -135,6 +137,17 @@ struct unwind_state {
 
 static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
 
+static unsigned unwind_debug;
+static int __init unwind_debug_setup(char *s)
+{
+       unwind_debug = simple_strtoul(s, NULL, 0);
+       return 1;
+}
+__setup("unwind_debug=", unwind_debug_setup);
+#define dprintk(lvl, fmt, args...) \
+       ((void)(lvl > unwind_debug \
+        || printk(KERN_DEBUG "unwind: " fmt "\n", ##args)))
+
 static struct unwind_table *find_table(unsigned long pc)
 {
        struct unwind_table *table;
@@ -151,7 +164,9 @@ static struct unwind_table *find_table(unsigned long pc)
 
 static unsigned long read_pointer(const u8 **pLoc,
                                   const void *end,
-                                  signed ptrType);
+                                  signed ptrType,
+                                  unsigned long text_base,
+                                  unsigned long data_base);
 
 static void init_unwind_table(struct unwind_table *table,
                               const char *name,
@@ -176,10 +191,13 @@ static void init_unwind_table(struct unwind_table *table,
        /* See if the linker provided table looks valid. */
        if (header_size <= 4
            || header_start[0] != 1
-           || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
-           || header_start[2] == DW_EH_PE_omit
-           || read_pointer(&ptr, end, header_start[2]) <= 0
-           || header_start[3] == DW_EH_PE_omit)
+           || (void *)read_pointer(&ptr, end, header_start[1], 0, 0)
+              != table_start
+           || !read_pointer(&ptr, end, header_start[2], 0, 0)
+           || !read_pointer(&ptr, end, header_start[3], 0,
+                            (unsigned long)header_start)
+           || !read_pointer(&ptr, end, header_start[3], 0,
+                            (unsigned long)header_start))
                header_start = NULL;
        table->hdrsz = header_size;
        smp_wmb();
@@ -269,7 +287,7 @@ static void __init setup_unwind_table(struct unwind_table *table,
                ptr = (const u8 *)(fde + 2);
                if (!read_pointer(&ptr,
                                  (const u8 *)(fde + 1) + *fde,
-                                 ptrType))
+                                 ptrType, 0, 0))
                        return;
                ++n;
        }
@@ -279,6 +297,7 @@ static void __init setup_unwind_table(struct unwind_table *table,
 
        hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
                + 2 * n * sizeof(unsigned long);
+       dprintk(2, "Binary lookup table size for %s: %lu bytes", table->name, hdrSize);
        header = alloc(hdrSize);
        if (!header)
                return;
@@ -303,7 +322,7 @@ static void __init setup_unwind_table(struct unwind_table *table,
                ptr = (const u8 *)(fde + 2);
                header->table[n].start = read_pointer(&ptr,
                                                      (const u8 *)(fde + 1) + *fde,
-                                                     fde_pointer_type(cie));
+                                                     fde_pointer_type(cie), 0, 0);
                header->table[n].fde = (unsigned long)fde;
                ++n;
        }
@@ -486,7 +505,9 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
 
 static unsigned long read_pointer(const u8 **pLoc,
                                   const void *end,
-                                  signed ptrType)
+                                  signed ptrType,
+                                  unsigned long text_base,
+                                  unsigned long data_base)
 {
        unsigned long value = 0;
        union {
@@ -498,13 +519,17 @@ static unsigned long read_pointer(const u8 **pLoc,
                const unsigned long *pul;
        } ptr;
 
-       if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+       if (ptrType < 0 || ptrType == DW_EH_PE_omit) {
+               dprintk(1, "Invalid pointer encoding %02X (%p,%p).", ptrType, *pLoc, end);
                return 0;
+       }
        ptr.p8 = *pLoc;
        switch(ptrType & DW_EH_PE_FORM) {
        case DW_EH_PE_data2:
-               if (end < (const void *)(ptr.p16u + 1))
+               if (end < (const void *)(ptr.p16u + 1)) {
+                       dprintk(1, "Data16 overrun (%p,%p).", ptr.p8, end);
                        return 0;
+               }
                if(ptrType & DW_EH_PE_signed)
                        value = get_unaligned(ptr.p16s++);
                else
@@ -512,8 +537,10 @@ static unsigned long read_pointer(const u8 **pLoc,
                break;
        case DW_EH_PE_data4:
 #ifdef CONFIG_64BIT
-               if (end < (const void *)(ptr.p32u + 1))
+               if (end < (const void *)(ptr.p32u + 1)) {
+                       dprintk(1, "Data32 overrun (%p,%p).", ptr.p8, end);
                        return 0;
+               }
                if(ptrType & DW_EH_PE_signed)
                        value = get_unaligned(ptr.p32s++);
                else
@@ -525,8 +552,10 @@ static unsigned long read_pointer(const u8 **pLoc,
                BUILD_BUG_ON(sizeof(u32) != sizeof(value));
 #endif
        case DW_EH_PE_native:
-               if (end < (const void *)(ptr.pul + 1))
+               if (end < (const void *)(ptr.pul + 1)) {
+                       dprintk(1, "DataUL overrun (%p,%p).", ptr.p8, end);
                        return 0;
+               }
                value = get_unaligned(ptr.pul++);
                break;
        case DW_EH_PE_leb128:
@@ -534,10 +563,14 @@ static unsigned long read_pointer(const u8 **pLoc,
                value = ptrType & DW_EH_PE_signed
                        ? get_sleb128(&ptr.p8, end)
                        : get_uleb128(&ptr.p8, end);
-               if ((const void *)ptr.p8 > end)
+               if ((const void *)ptr.p8 > end) {
+                       dprintk(1, "DataLEB overrun (%p,%p).", ptr.p8, end);
                        return 0;
+               }
                break;
        default:
+               dprintk(2, "Cannot decode pointer type %02X (%p,%p).",
+                       ptrType, ptr.p8, end);
                return 0;
        }
        switch(ptrType & DW_EH_PE_ADJUST) {
@@ -546,12 +579,33 @@ static unsigned long read_pointer(const u8 **pLoc,
        case DW_EH_PE_pcrel:
                value += (unsigned long)*pLoc;
                break;
+       case DW_EH_PE_textrel:
+               if (likely(text_base)) {
+                       value += text_base;
+                       break;
+               }
+               dprintk(2, "Text-relative encoding %02X (%p,%p), but zero text base.",
+                       ptrType, *pLoc, end);
+               return 0;
+       case DW_EH_PE_datarel:
+               if (likely(data_base)) {
+                       value += data_base;
+                       break;
+               }
+               dprintk(2, "Data-relative encoding %02X (%p,%p), but zero data base.",
+                       ptrType, *pLoc, end);
+               return 0;
        default:
+               dprintk(2, "Cannot adjust pointer type %02X (%p,%p).",
+                       ptrType, *pLoc, end);
                return 0;
        }
        if ((ptrType & DW_EH_PE_indirect)
-           && __get_user(value, (unsigned long *)value))
+           && probe_kernel_address((unsigned long *)value, value)) {
+               dprintk(1, "Cannot read indirect value %lx (%p,%p).",
+                       value, *pLoc, end);
                return 0;
+       }
        *pLoc = ptr.p8;
 
        return value;
@@ -594,7 +648,8 @@ static signed fde_pointer_type(const u32 *cie)
                        case 'P': {
                                        signed ptrType = *ptr++;
 
-                                       if (!read_pointer(&ptr, end, ptrType) || ptr > end)
+                                       if (!read_pointer(&ptr, end, ptrType, 0, 0)
+                                           || ptr > end)
                                                return -1;
                                }
                                break;
@@ -654,7 +709,8 @@ static int processCFI(const u8 *start,
                        case DW_CFA_nop:
                                break;
                        case DW_CFA_set_loc:
-                               if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0)
+                               state->loc = read_pointer(&ptr.p8, end, ptrType, 0, 0);
+                               if (state->loc == 0)
                                        result = 0;
                                break;
                        case DW_CFA_advance_loc1:
@@ -700,8 +756,10 @@ static int processCFI(const u8 *start,
                                        state->label = NULL;
                                        return 1;
                                }
-                               if (state->stackDepth >= MAX_STACK_DEPTH)
+                               if (state->stackDepth >= MAX_STACK_DEPTH) {
+                                       dprintk(1, "State stack overflow (%p,%p).", ptr.p8, end);
                                        return 0;
+                               }
                                state->stack[state->stackDepth++] = ptr.p8;
                                break;
                        case DW_CFA_restore_state:
@@ -716,8 +774,10 @@ static int processCFI(const u8 *start,
                                        result = processCFI(start, end, 0, ptrType, state);
                                        state->loc = loc;
                                        state->label = label;
-                               } else
+                               } else {
+                                       dprintk(1, "State stack underflow (%p,%p).", ptr.p8, end);
                                        return 0;
+                               }
                                break;
                        case DW_CFA_def_cfa:
                                state->cfa.reg = get_uleb128(&ptr.p8, end);
@@ -749,6 +809,7 @@ static int processCFI(const u8 *start,
                                break;
                        case DW_CFA_GNU_window_save:
                        default:
+                               dprintk(1, "Unrecognized CFI op %02X (%p,%p).", ptr.p8[-1], ptr.p8 - 1, end);
                                result = 0;
                                break;
                        }
@@ -764,12 +825,17 @@ static int processCFI(const u8 *start,
                        set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
                        break;
                }
-               if (ptr.p8 > end)
+               if (ptr.p8 > end) {
+                       dprintk(1, "Data overrun (%p,%p).", ptr.p8, end);
                        result = 0;
+               }
                if (result && targetLoc != 0 && targetLoc < state->loc)
                        return 1;
        }
 
+       if (result && ptr.p8 < end)
+               dprintk(1, "Data underrun (%p,%p).", ptr.p8, end);
+
        return result
           && ptr.p8 == end
           && (targetLoc == 0
@@ -786,7 +852,7 @@ int unwind(struct unwind_frame_info *frame)
 #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
        const u32 *fde = NULL, *cie = NULL;
        const u8 *ptr = NULL, *end = NULL;
-       unsigned long pc = UNW_PC(frame) - frame->call_frame;
+       unsigned long pc = UNW_PC(frame) - frame->call_frame, sp;
        unsigned long startLoc = 0, endLoc = 0, cfa;
        unsigned i;
        signed ptrType = -1;
@@ -813,9 +879,9 @@ int unwind(struct unwind_frame_info *frame)
                        ptr = hdr + 4;
                        end = hdr + table->hdrsz;
                        if (tableSize
-                           && read_pointer(&ptr, end, hdr[1])
+                           && read_pointer(&ptr, end, hdr[1], 0, 0)
                               == (unsigned long)table->address
-                           && (i = read_pointer(&ptr, end, hdr[2])) > 0
+                           && (i = read_pointer(&ptr, end, hdr[2], 0, 0)) > 0
                            && i == (end - ptr) / (2 * tableSize)
                            && !((end - ptr) % (2 * tableSize))) {
                                do {
@@ -823,7 +889,8 @@ int unwind(struct unwind_frame_info *frame)
 
                                        startLoc = read_pointer(&cur,
                                                                cur + tableSize,
-                                                               hdr[3]);
+                                                               hdr[3], 0,
+                                                               (unsigned long)hdr);
                                        if (pc < startLoc)
                                                i /= 2;
                                        else {
@@ -834,13 +901,17 @@ int unwind(struct unwind_frame_info *frame)
                                if (i == 1
                                    && (startLoc = read_pointer(&ptr,
                                                                ptr + tableSize,
-                                                               hdr[3])) != 0
+                                                               hdr[3], 0,
+                                                               (unsigned long)hdr)) != 0
                                    && pc >= startLoc)
                                        fde = (void *)read_pointer(&ptr,
                                                                   ptr + tableSize,
-                                                                  hdr[3]);
+                                                                  hdr[3], 0,
+                                                                  (unsigned long)hdr);
                        }
                }
+               if(hdr && !fde)
+                       dprintk(3, "Binary lookup for %lx failed.", pc);
 
                if (fde != NULL) {
                        cie = cie_for_fde(fde, table);
@@ -851,17 +922,19 @@ int unwind(struct unwind_frame_info *frame)
                           && (ptrType = fde_pointer_type(cie)) >= 0
                           && read_pointer(&ptr,
                                           (const u8 *)(fde + 1) + *fde,
-                                          ptrType) == startLoc) {
+                                          ptrType, 0, 0) == startLoc) {
                                if (!(ptrType & DW_EH_PE_indirect))
                                        ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
                                endLoc = startLoc
                                         + read_pointer(&ptr,
                                                        (const u8 *)(fde + 1) + *fde,
-                                                       ptrType);
+                                                       ptrType, 0, 0);
                                if(pc >= endLoc)
                                        fde = NULL;
                        } else
                                fde = NULL;
+                       if(!fde)
+                               dprintk(1, "Binary lookup result for %lx discarded.", pc);
                }
                if (fde == NULL) {
                        for (fde = table->address, tableSize = table->size;
@@ -881,7 +954,7 @@ int unwind(struct unwind_frame_info *frame)
                                ptr = (const u8 *)(fde + 2);
                                startLoc = read_pointer(&ptr,
                                                        (const u8 *)(fde + 1) + *fde,
-                                                       ptrType);
+                                                       ptrType, 0, 0);
                                if (!startLoc)
                                        continue;
                                if (!(ptrType & DW_EH_PE_indirect))
@@ -889,10 +962,12 @@ int unwind(struct unwind_frame_info *frame)
                                endLoc = startLoc
                                         + read_pointer(&ptr,
                                                        (const u8 *)(fde + 1) + *fde,
-                                                       ptrType);
+                                                       ptrType, 0, 0);
                                if (pc >= startLoc && pc < endLoc)
                                        break;
                        }
+                       if(!fde)
+                               dprintk(3, "Linear lookup for %lx failed.", pc);
                }
        }
        if (cie != NULL) {
@@ -926,6 +1001,8 @@ int unwind(struct unwind_frame_info *frame)
                        if (ptr >= end || *ptr)
                                cie = NULL;
                }
+               if(!cie)
+                       dprintk(1, "CIE unusable (%p,%p).", ptr, end);
                ++ptr;
        }
        if (cie != NULL) {
@@ -935,7 +1012,12 @@ int unwind(struct unwind_frame_info *frame)
                state.dataAlign = get_sleb128(&ptr, end);
                if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
                        cie = NULL;
-               else {
+               else if (UNW_PC(frame) % state.codeAlign
+                        || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+                       dprintk(1, "Input pointer(s) misaligned (%lx,%lx).",
+                               UNW_PC(frame), UNW_SP(frame));
+                       return -EPERM;
+               } else {
                        retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
                        /* skip augmentation */
                        if (((const char *)(cie + 2))[1] == 'z') {
@@ -949,6 +1031,8 @@ int unwind(struct unwind_frame_info *frame)
                           || reg_info[retAddrReg].width != sizeof(unsigned long))
                                cie = NULL;
                }
+               if(!cie)
+                       dprintk(1, "CIE validation failed (%p,%p).", ptr, end);
        }
        if (cie != NULL) {
                state.cieStart = ptr;
@@ -962,11 +1046,15 @@ int unwind(struct unwind_frame_info *frame)
                        if ((ptr += augSize) > end)
                                fde = NULL;
                }
+               if(!fde)
+                       dprintk(1, "FDE validation failed (%p,%p).", ptr, end);
        }
        if (cie == NULL || fde == NULL) {
 #ifdef CONFIG_FRAME_POINTER
                unsigned long top, bottom;
 
+               if ((UNW_SP(frame) | UNW_FP(frame)) % sizeof(unsigned long))
+                       return -EPERM;
                top = STACK_TOP(frame->task);
                bottom = STACK_BOTTOM(frame->task);
 # if FRAME_RETADDR_OFFSET < 0
@@ -982,18 +1070,19 @@ int unwind(struct unwind_frame_info *frame)
                        & (sizeof(unsigned long) - 1))) {
                        unsigned long link;
 
-                       if (!__get_user(link,
+                       if (!probe_kernel_address(
                                        (unsigned long *)(UNW_FP(frame)
-                                                         + FRAME_LINK_OFFSET))
+                                                         + FRAME_LINK_OFFSET),
+                                                 link)
 # if FRAME_RETADDR_OFFSET < 0
                           && link > bottom && link < UNW_FP(frame)
 # else
                           && link > UNW_FP(frame) && link < bottom
 # endif
                           && !(link & (sizeof(link) - 1))
-                          && !__get_user(UNW_PC(frame),
+                          && !probe_kernel_address(
                                          (unsigned long *)(UNW_FP(frame)
-                                                           + FRAME_RETADDR_OFFSET))) {
+                                                           + FRAME_RETADDR_OFFSET), UNW_PC(frame))) {
                                UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
 # if FRAME_RETADDR_OFFSET < 0
                                        -
@@ -1016,8 +1105,11 @@ int unwind(struct unwind_frame_info *frame)
           || state.regs[retAddrReg].where == Nowhere
           || state.cfa.reg >= ARRAY_SIZE(reg_info)
           || reg_info[state.cfa.reg].width != sizeof(unsigned long)
-          || state.cfa.offs % sizeof(unsigned long))
+          || FRAME_REG(state.cfa.reg, unsigned long) % sizeof(unsigned long)
+          || state.cfa.offs % sizeof(unsigned long)) {
+               dprintk(1, "Unusable unwind info (%p,%p).", ptr, end);
                return -EIO;
+       }
        /* update frame */
 #ifndef CONFIG_AS_CFI_SIGNAL_FRAME
        if(frame->call_frame
@@ -1036,10 +1128,14 @@ int unwind(struct unwind_frame_info *frame)
 #else
 # define CASES CASE(8); CASE(16); CASE(32); CASE(64)
 #endif
+       pc = UNW_PC(frame);
+       sp = UNW_SP(frame);
        for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
                if (REG_INVALID(i)) {
                        if (state.regs[i].where == Nowhere)
                                continue;
+                       dprintk(1, "Cannot restore register %u (%d).",
+                               i, state.regs[i].where);
                        return -EIO;
                }
                switch(state.regs[i].where) {
@@ -1048,8 +1144,11 @@ int unwind(struct unwind_frame_info *frame)
                case Register:
                        if (state.regs[i].value >= ARRAY_SIZE(reg_info)
                           || REG_INVALID(state.regs[i].value)
-                          || reg_info[i].width > reg_info[state.regs[i].value].width)
+                          || reg_info[i].width > reg_info[state.regs[i].value].width) {
+                               dprintk(1, "Cannot restore register %u from register %lu.",
+                                       i, state.regs[i].value);
                                return -EIO;
+                       }
                        switch(reg_info[state.regs[i].value].width) {
 #define CASE(n) \
                        case sizeof(u##n): \
@@ -1059,6 +1158,9 @@ int unwind(struct unwind_frame_info *frame)
                        CASES;
 #undef CASE
                        default:
+                               dprintk(1, "Unsupported register size %u (%lu).",
+                                       reg_info[state.regs[i].value].width,
+                                       state.regs[i].value);
                                return -EIO;
                        }
                        break;
@@ -1083,12 +1185,17 @@ int unwind(struct unwind_frame_info *frame)
                        CASES;
 #undef CASE
                        default:
+                               dprintk(1, "Unsupported register size %u (%u).",
+                                       reg_info[i].width, i);
                                return -EIO;
                        }
                        break;
                case Value:
-                       if (reg_info[i].width != sizeof(unsigned long))
+                       if (reg_info[i].width != sizeof(unsigned long)) {
+                               dprintk(1, "Unsupported value size %u (%u).",
+                                       reg_info[i].width, i);
                                return -EIO;
+                       }
                        FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
                                                            * state.dataAlign;
                        break;
@@ -1100,15 +1207,20 @@ int unwind(struct unwind_frame_info *frame)
                                    % sizeof(unsigned long)
                                    || addr < startLoc
                                    || addr + sizeof(unsigned long) < addr
-                                   || addr + sizeof(unsigned long) > endLoc)
+                                   || addr + sizeof(unsigned long) > endLoc) {
+                                       dprintk(1, "Bad memory location %lx (%lx).",
+                                               addr, state.regs[i].value);
                                        return -EIO;
+                               }
                                switch(reg_info[i].width) {
 #define CASE(n)     case sizeof(u##n): \
-                                       __get_user(FRAME_REG(i, u##n), (u##n *)addr); \
+                                       probe_kernel_address((u##n *)addr, FRAME_REG(i, u##n)); \
                                        break
                                CASES;
 #undef CASE
                                default:
+                                       dprintk(1, "Unsupported memory size %u (%u).",
+                                               reg_info[i].width, i);
                                        return -EIO;
                                }
                        }
@@ -1116,6 +1228,17 @@ int unwind(struct unwind_frame_info *frame)
                }
        }
 
+       if (UNW_PC(frame) % state.codeAlign
+           || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
+               dprintk(1, "Output pointer(s) misaligned (%lx,%lx).",
+                       UNW_PC(frame), UNW_SP(frame));
+               return -EIO;
+       }
+       if (pc == UNW_PC(frame) && sp == UNW_SP(frame)) {
+               dprintk(1, "No progress (%lx,%lx).", pc, sp);
+               return -EIO;
+       }
+
        return 0;
 #undef CASES
 #undef FRAME_REG
index 220e586127a0ca9cd65b3ccfd6b01c4620fef2e7..4869563080e9e080954d26f34b70bbffc13de18b 100644 (file)
@@ -26,7 +26,7 @@
 #define __uidhashfn(uid)       (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
 #define uidhashentry(uid)      (uidhash_table + __uidhashfn((uid)))
 
-static kmem_cache_t *uid_cachep;
+static struct kmem_cache *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
 
 /*
@@ -132,7 +132,7 @@ struct user_struct * alloc_uid(uid_t uid)
        if (!up) {
                struct user_struct *new;
 
-               new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
+               new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
                if (!new)
                        return NULL;
                new->uid = uid;
index 17c2f03d2c27f9928cf175db8b01a929426d130c..6b186750e9be1be41c8951160fb8db9dddd80aa8 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@ struct cpu_workqueue_struct {
        struct task_struct *thread;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
+
+       int freezeable;         /* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -80,6 +85,102 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
        return list_empty(&wq->list);
 }
 
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+       unsigned long new, old, res;
+
+       /* assume the pending flag is already set and that the task has already
+        * been queued on this workqueue */
+       new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+       res = work->management;
+       if (res != new) {
+               do {
+                       old = res;
+                       new = (unsigned long) wq;
+                       new |= (old & WORK_STRUCT_FLAG_MASK);
+                       res = cmpxchg(&work->management, old, new);
+               } while (res != old);
+       }
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+       return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
+}
+
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cwq->lock, flags);
+       /*
+        * We need to re-validate the work info after we've gotten
+        * the cpu_workqueue lock. We can run the work now iff:
+        *
+        *  - the wq_data still matches the cpu_workqueue_struct
+        *  - AND the work is still marked pending
+        *  - AND the work is still on a list (which will be this
+        *    workqueue_struct list)
+        *
+        * All these conditions are important, because we
+        * need to protect against the work being run right
+        * now on another CPU (all but the last one might be
+        * true if it's currently running and has not been
+        * released yet, for example).
+        */
+       if (get_wq_data(work) == cwq
+           && work_pending(work)
+           && !list_empty(&work->entry)) {
+               work_func_t f = work->func;
+               list_del_init(&work->entry);
+               spin_unlock_irqrestore(&cwq->lock, flags);
+
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+                       work_release(work);
+               f(work);
+
+               spin_lock_irqsave(&cwq->lock, flags);
+               cwq->remove_sequence++;
+               wake_up(&cwq->work_done);
+               ret = 1;
+       }
+       spin_unlock_irqrestore(&cwq->lock, flags);
+       return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+       for (;;) {
+               struct cpu_workqueue_struct *cwq;
+
+               if (!work_pending(work))
+                       return 0;
+               if (list_empty(&work->entry))
+                       return 0;
+               /* NOTE! This depends intimately on __queue_work! */
+               cwq = get_wq_data(work);
+               if (!cwq)
+                       return 0;
+               if (__run_work(cwq, work))
+                       return 1;
+       }
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
 /* Preempt must be disabled. */
 static void __queue_work(struct cpu_workqueue_struct *cwq,
                         struct work_struct *work)
@@ -87,7 +188,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
        unsigned long flags;
 
        spin_lock_irqsave(&cwq->lock, flags);
-       work->wq_data = cwq;
+       set_wq_data(work, cwq);
        list_add_tail(&work->entry, &cwq->worklist);
        cwq->insert_sequence++;
        wake_up(&cwq->more_work);
@@ -108,7 +209,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        int ret = 0, cpu = get_cpu();
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
                if (unlikely(is_single_threaded(wq)))
                        cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
@@ -122,38 +223,42 @@ EXPORT_SYMBOL_GPL(queue_work);
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-       struct work_struct *work = (struct work_struct *)__data;
-       struct workqueue_struct *wq = work->wq_data;
+       struct delayed_work *dwork = (struct delayed_work *)__data;
+       struct workqueue_struct *wq = get_wq_data(&dwork->work);
        int cpu = smp_processor_id();
 
        if (unlikely(is_single_threaded(wq)))
                cpu = singlethread_cpu;
 
-       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: work to queue
+ * @work: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
+
+       if (delay == 0)
+               return queue_work(wq, work);
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                /* This stores wq for the moment, for the timer_fn */
-               work->wq_data = wq;
+               set_wq_data(work, wq);
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer(timer);
                ret = 1;
@@ -172,19 +277,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                /* This stores wq for the moment, for the timer_fn */
-               work->wq_data = wq;
+               set_wq_data(work, wq);
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer_on(timer, cpu);
                ret = 1;
@@ -212,15 +318,26 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
-               void (*f) (void *) = work->func;
-               void *data = work->data;
+               work_func_t f = work->func;
 
                list_del_init(cwq->worklist.next);
                spin_unlock_irqrestore(&cwq->lock, flags);
 
-               BUG_ON(work->wq_data != cwq);
-               clear_bit(0, &work->pending);
-               f(data);
+               BUG_ON(get_wq_data(work) != cwq);
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
+                       work_release(work);
+               f(work);
+
+               if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+                       printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+                                       "%s/0x%08x/%d\n",
+                                       current->comm, preempt_count(),
+                                       current->pid);
+                       printk(KERN_ERR "    last function: ");
+                       print_symbol("%s\n", (unsigned long)f);
+                       debug_show_held_locks(current);
+                       dump_stack();
+               }
 
                spin_lock_irqsave(&cwq->lock, flags);
                cwq->remove_sequence++;
@@ -237,7 +354,8 @@ static int worker_thread(void *__cwq)
        struct k_sigaction sa;
        sigset_t blocked;
 
-       current->flags |= PF_NOFREEZE;
+       if (!cwq->freezeable)
+               current->flags |= PF_NOFREEZE;
 
        set_user_nice(current, -5);
 
@@ -260,6 +378,9 @@ static int worker_thread(void *__cwq)
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
+               if (cwq->freezeable)
+                       try_to_freeze();
+
                add_wait_queue(&cwq->more_work, &wait);
                if (list_empty(&cwq->worklist))
                        schedule();
@@ -336,7 +457,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-                                                  int cpu)
+                                                  int cpu, int freezeable)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        struct task_struct *p;
@@ -346,6 +467,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
        cwq->thread = NULL;
        cwq->insert_sequence = 0;
        cwq->remove_sequence = 0;
+       cwq->freezeable = freezeable;
        INIT_LIST_HEAD(&cwq->worklist);
        init_waitqueue_head(&cwq->more_work);
        init_waitqueue_head(&cwq->work_done);
@@ -361,7 +483,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-                                           int singlethread)
+                                           int singlethread, int freezeable)
 {
        int cpu, destroy = 0;
        struct workqueue_struct *wq;
@@ -381,7 +503,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        mutex_lock(&workqueue_mutex);
        if (singlethread) {
                INIT_LIST_HEAD(&wq->list);
-               p = create_workqueue_thread(wq, singlethread_cpu);
+               p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
                if (!p)
                        destroy = 1;
                else
@@ -389,7 +511,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        } else {
                list_add(&wq->list, &workqueues);
                for_each_online_cpu(cpu) {
-                       p = create_workqueue_thread(wq, cpu);
+                       p = create_workqueue_thread(wq, cpu, freezeable);
                        if (p) {
                                kthread_bind(p, cpu);
                                wake_up_process(p);
@@ -468,38 +590,37 @@ EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work(keventd_wq, work, delay);
+       return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
  * @delay: number of jiffies to wait
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
 int schedule_delayed_work_on(int cpu,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+       return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
  * schedule_on_each_cpu - call a function on each online CPU from keventd
  * @func: the function to call
- * @info: a pointer to pass to func()
  *
  * Returns zero on success.
  * Returns -ve errno on failure.
@@ -508,7 +629,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
  *
  * schedule_on_each_cpu() is very slow.
  */
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
        struct work_struct *works;
@@ -519,7 +640,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
 
        mutex_lock(&workqueue_mutex);
        for_each_online_cpu(cpu) {
-               INIT_WORK(per_cpu_ptr(works, cpu), func, info);
+               INIT_WORK(per_cpu_ptr(works, cpu), func);
                __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
                                per_cpu_ptr(works, cpu));
        }
@@ -539,12 +660,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *                     work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-                                      struct work_struct *work)
+                                      struct delayed_work *dwork)
 {
-       while (!cancel_delayed_work(work))
+       while (!cancel_delayed_work(dwork))
                flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +673,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *                     work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-       cancel_rearming_delayed_workqueue(keventd_wq, work);
+       cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:                the function to execute
- * @data:      data to pass to the function
  * @ew:                guaranteed storage for the execute work structure (must
  *             be available when the work executes)
  *
@@ -573,15 +693,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
  * Returns:    0 - function was executed
  *             1 - function was scheduled for execution
  */
-int execute_in_process_context(void (*fn)(void *data), void *data,
-                              struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 {
        if (!in_interrupt()) {
-               fn(data);
+               fn(&ew->work);
                return 0;
        }
 
-       INIT_WORK(&ew->work, fn, data);
+       INIT_WORK(&ew->work, fn);
        schedule_work(&ew->work);
 
        return 1;
@@ -609,7 +728,6 @@ int current_is_keventd(void)
 
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* Take the work from this (downed) CPU. */
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
@@ -642,7 +760,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                mutex_lock(&workqueue_mutex);
                /* Create a new workqueue thread for it. */
                list_for_each_entry(wq, &workqueues, list) {
-                       if (!create_workqueue_thread(wq, hotcpu)) {
+                       if (!create_workqueue_thread(wq, hotcpu, 0)) {
                                printk("workqueue for %i failed\n", hotcpu);
                                return NOTIFY_BAD;
                        }
@@ -692,7 +810,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
-#endif
 
 void init_workqueues(void)
 {
index d3679103a8e4eff9baafdfd30f41e9f28aefe7ef..b75fed737f2544651d58896215eccb0b80d49f73 100644 (file)
@@ -1,6 +1,7 @@
 
 config PRINTK_TIME
        bool "Show timing information on printks"
+       depends on PRINTK
        help
          Selecting this option causes timing information to be
          included in printk output.  This allows you to measure
index cf98fabaa549524d2f882d06649abfaa4c91239d..fea8f9035f07c13afb770f5781fa145d9dd6b70f 100644 (file)
@@ -25,7 +25,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
-lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
index 0331ed825ea79e568a881852020a80adc3d4d658..8a5b5303bd4f7a78187b4138eb08945c6333c10c 100644 (file)
 #include <linux/kernel.h>
 #include <linux/string.h>
 
+/*
+ *     If a hyphen was found in get_option, this will handle the
+ *     range of numbers, M-N.  This will expand the range and insert
+ *     the values[M, M+1, ..., N] into the ints array in get_options.
+ */
+
+static int get_range(char **str, int *pint)
+{
+       int x, inc_counter, upper_range;
+
+       (*str)++;
+       upper_range = simple_strtol((*str), NULL, 0);
+       inc_counter = upper_range - *pint;
+       for (x = *pint; x < upper_range; x++)
+               *pint++ = x;
+       return inc_counter;
+}
 
 /**
  *     get_option - Parse integer from an option string
@@ -29,6 +46,7 @@
  *     0 : no int in string
  *     1 : int found, no subsequent comma
  *     2 : int found including a subsequent comma
+ *     3 : hyphen found to denote a range
  */
 
 int get_option (char **str, int *pint)
@@ -44,6 +62,8 @@ int get_option (char **str, int *pint)
                (*str)++;
                return 2;
        }
+       if (**str == '-')
+               return 3;
 
        return 1;
 }
@@ -55,7 +75,8 @@ int get_option (char **str, int *pint)
  *     @ints: integer array
  *
  *     This function parses a string containing a comma-separated
- *     list of integers.  The parse halts when the array is
+ *     list of integers, a hyphen-separated range of _positive_ integers,
+ *     or a combination of both.  The parse halts when the array is
  *     full, or when no more numbers can be retrieved from the
  *     string.
  *
@@ -72,6 +93,18 @@ char *get_options(const char *str, int nints, int *ints)
                res = get_option ((char **)&str, ints + i);
                if (res == 0)
                        break;
+               if (res == 3) {
+                       int range_nums;
+                       range_nums = get_range((char **)&str, ints + i);
+                       if (range_nums < 0)
+                               break;
+                       /*
+                        * Decrement the result by one to leave out the
+                        * last number in the range.  The next iteration
+                        * will handle the upper number in the range
+                        */
+                       i += (range_nums - 1);
+               }
                i++;
                if (res == 1)
                        break;
index 16d2143fea4847e5c367e56d87db654599109713..71853531d3b0f6be6c0db0420ca8d646dd884469 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/idr.h>
 
-static kmem_cache_t *idr_layer_cache;
+static struct kmem_cache *idr_layer_cache;
 
 static struct idr_layer *alloc_layer(struct idr *idp)
 {
@@ -445,7 +445,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
 }
 EXPORT_SYMBOL(idr_replace);
 
-static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
+static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
                unsigned long flags)
 {
        memset(idr_layer, 0, sizeof(struct idr_layer));
index 55689c5d3379dbde2013f8cf3509bef6e9d74598..d6ccdd85df5311a4326bf584920a1293dc11243c 100644 (file)
        }                                                       \
 } while (0)
 
+#ifndef pio_read16be
+#define pio_read16be(port) swab16(inw(port))
+#define pio_read32be(port) swab32(inl(port))
+#endif
+
+#ifndef mmio_read16be
+#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
+#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
+#endif
+
 unsigned int fastcall ioread8(void __iomem *addr)
 {
        IO_COND(addr, return inb(port), return readb(addr));
@@ -60,7 +70,7 @@ unsigned int fastcall ioread16(void __iomem *addr)
 }
 unsigned int fastcall ioread16be(void __iomem *addr)
 {
-       IO_COND(addr, return inw(port), return be16_to_cpu(__raw_readw(addr)));
+       IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
 }
 unsigned int fastcall ioread32(void __iomem *addr)
 {
@@ -68,7 +78,7 @@ unsigned int fastcall ioread32(void __iomem *addr)
 }
 unsigned int fastcall ioread32be(void __iomem *addr)
 {
-       IO_COND(addr, return inl(port), return be32_to_cpu(__raw_readl(addr)));
+       IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
 }
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -76,6 +86,16 @@ EXPORT_SYMBOL(ioread16be);
 EXPORT_SYMBOL(ioread32);
 EXPORT_SYMBOL(ioread32be);
 
+#ifndef pio_write16be
+#define pio_write16be(val,port) outw(swab16(val),port)
+#define pio_write32be(val,port) outl(swab32(val),port)
+#endif
+
+#ifndef mmio_write16be
+#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
+#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
+#endif
+
 void fastcall iowrite8(u8 val, void __iomem *addr)
 {
        IO_COND(addr, outb(val,port), writeb(val, addr));
@@ -86,7 +106,7 @@ void fastcall iowrite16(u16 val, void __iomem *addr)
 }
 void fastcall iowrite16be(u16 val, void __iomem *addr)
 {
-       IO_COND(addr, outw(val,port), __raw_writew(cpu_to_be16(val), addr));
+       IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
 }
 void fastcall iowrite32(u32 val, void __iomem *addr)
 {
@@ -94,7 +114,7 @@ void fastcall iowrite32(u32 val, void __iomem *addr)
 }
 void fastcall iowrite32be(u32 val, void __iomem *addr)
 {
-       IO_COND(addr, outl(val,port), __raw_writel(cpu_to_be32(val), addr));
+       IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
 }
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -108,6 +128,7 @@ EXPORT_SYMBOL(iowrite32be);
  * convert to CPU byte order. We write in "IO byte
  * order" (we also don't have IO barriers).
  */
+#ifndef mmio_insb
 static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
 {
        while (--count >= 0) {
@@ -132,7 +153,9 @@ static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
                dst++;
        }
 }
+#endif
 
+#ifndef mmio_outsb
 static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
 {
        while (--count >= 0) {
@@ -154,6 +177,7 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
                src++;
        }
 }
+#endif
 
 void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 {
index 744a4b102c7fa595b09808f8a6f949318a1f57a1..7ce6dc138e90ed391618b222847a664024daf402 100644 (file)
@@ -111,10 +111,9 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
        len = get_kobj_path_length(kobj);
        if (len == 0)
                return NULL;
-       path = kmalloc(len, gfp_mask);
+       path = kzalloc(len, gfp_mask);
        if (!path)
                return NULL;
-       memset(path, 0x00, len);
        fill_kobj_path(kobj, path, len);
 
        return path;
index 7ba9d823d388665cdef6cb647a13cc0bd41ddb75..4350ba9655bd182c966c1fc40909ace292c8a4a6 100644 (file)
@@ -21,13 +21,15 @@ void __list_add(struct list_head *new,
                              struct list_head *next)
 {
        if (unlikely(next->prev != prev)) {
-               printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
-                       prev, next->prev);
+               printk(KERN_ERR "list_add corruption. next->prev should be "
+                       "prev (%p), but was %p. (next=%p).\n",
+                       prev, next->prev, next);
                BUG();
        }
        if (unlikely(prev->next != next)) {
-               printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
-                       next, prev->next);
+               printk(KERN_ERR "list_add corruption. prev->next should be "
+                       "next (%p), but was %p. (prev=%p).\n",
+                       next, prev->next, prev);
                BUG();
        }
        next->prev = new;
index 7945787f439a2152138da4b85c1604a8bed53c81..280332c1827cbb7b1d183e349f8eb1ba35cd2f8e 100644 (file)
@@ -963,7 +963,9 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
                        printk("failed|");
                } else {
                        unexpected_testcase_failures++;
+
                        printk("FAILED|");
+                       dump_stack();
                }
        } else {
                testcase_successes++;
index aa9bfd0bdbd1bd720ed1eb8c5416b180bad3b98a..d69ddbe438655be6b65672d2c7366371baadfd85 100644 (file)
@@ -2,6 +2,7 @@
  * Copyright (C) 2001 Momchil Velikov
  * Portions Copyright (C) 2001 Christoph Hellwig
  * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2006 Nick Piggin
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as
@@ -30,6 +31,7 @@
 #include <linux/gfp.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
+#include <linux/rcupdate.h>
 
 
 #ifdef __KERNEL__
@@ -45,7 +47,9 @@
        ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 struct radix_tree_node {
+       unsigned int    height;         /* Height from the bottom */
        unsigned int    count;
+       struct rcu_head rcu_head;
        void            *slots[RADIX_TREE_MAP_SIZE];
        unsigned long   tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
@@ -63,7 +67,7 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly;
 /*
  * Radix tree node cache.
  */
-static kmem_cache_t *radix_tree_node_cachep;
+static struct kmem_cache *radix_tree_node_cachep;
 
 /*
  * Per-cpu pool of preloaded nodes
@@ -100,13 +104,21 @@ radix_tree_node_alloc(struct radix_tree_root *root)
                        rtp->nr--;
                }
        }
+       BUG_ON(radix_tree_is_direct_ptr(ret));
        return ret;
 }
 
+static void radix_tree_node_rcu_free(struct rcu_head *head)
+{
+       struct radix_tree_node *node =
+                       container_of(head, struct radix_tree_node, rcu_head);
+       kmem_cache_free(radix_tree_node_cachep, node);
+}
+
 static inline void
 radix_tree_node_free(struct radix_tree_node *node)
 {
-       kmem_cache_free(radix_tree_node_cachep, node);
+       call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 }
 
 /*
@@ -222,11 +234,12 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
        }
 
        do {
+               unsigned int newheight;
                if (!(node = radix_tree_node_alloc(root)))
                        return -ENOMEM;
 
                /* Increase the height.  */
-               node->slots[0] = root->rnode;
+               node->slots[0] = radix_tree_direct_to_ptr(root->rnode);
 
                /* Propagate the aggregated tag info into the new root */
                for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -234,9 +247,11 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
                                tag_set(node, tag, 0);
                }
 
+               newheight = root->height+1;
+               node->height = newheight;
                node->count = 1;
-               root->rnode = node;
-               root->height++;
+               rcu_assign_pointer(root->rnode, node);
+               root->height = newheight;
        } while (height > root->height);
 out:
        return 0;
@@ -258,6 +273,8 @@ int radix_tree_insert(struct radix_tree_root *root,
        int offset;
        int error;
 
+       BUG_ON(radix_tree_is_direct_ptr(item));
+
        /* Make sure the tree is high enough.  */
        if (index > radix_tree_maxindex(root->height)) {
                error = radix_tree_extend(root, index);
@@ -275,11 +292,12 @@ int radix_tree_insert(struct radix_tree_root *root,
                        /* Have to add a child node.  */
                        if (!(slot = radix_tree_node_alloc(root)))
                                return -ENOMEM;
+                       slot->height = height;
                        if (node) {
-                               node->slots[offset] = slot;
+                               rcu_assign_pointer(node->slots[offset], slot);
                                node->count++;
                        } else
-                               root->rnode = slot;
+                               rcu_assign_pointer(root->rnode, slot);
                }
 
                /* Go a level down */
@@ -295,11 +313,11 @@ int radix_tree_insert(struct radix_tree_root *root,
 
        if (node) {
                node->count++;
-               node->slots[offset] = item;
+               rcu_assign_pointer(node->slots[offset], item);
                BUG_ON(tag_get(node, 0, offset));
                BUG_ON(tag_get(node, 1, offset));
        } else {
-               root->rnode = item;
+               rcu_assign_pointer(root->rnode, radix_tree_ptr_to_direct(item));
                BUG_ON(root_tag_get(root, 0));
                BUG_ON(root_tag_get(root, 1));
        }
@@ -308,49 +326,54 @@ int radix_tree_insert(struct radix_tree_root *root,
 }
 EXPORT_SYMBOL(radix_tree_insert);
 
-static inline void **__lookup_slot(struct radix_tree_root *root,
-                                  unsigned long index)
+/**
+ *     radix_tree_lookup_slot    -    lookup a slot in a radix tree
+ *     @root:          radix tree root
+ *     @index:         index key
+ *
+ *     Returns:  the slot corresponding to the position @index in the
+ *     radix tree @root. This is useful for update-if-exists operations.
+ *
+ *     This function cannot be called under rcu_read_lock, it must be
+ *     excluded from writers, as must the returned slot for subsequent
+ *     use by radix_tree_deref_slot() and radix_tree_replace slot.
+ *     Caller must hold tree write locked across slot lookup and
+ *     replace.
+ */
+void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
 {
        unsigned int height, shift;
-       struct radix_tree_node **slot;
-
-       height = root->height;
+       struct radix_tree_node *node, **slot;
 
-       if (index > radix_tree_maxindex(height))
+       node = root->rnode;
+       if (node == NULL)
                return NULL;
 
-       if (height == 0 && root->rnode)
+       if (radix_tree_is_direct_ptr(node)) {
+               if (index > 0)
+                       return NULL;
                return (void **)&root->rnode;
+       }
+
+       height = node->height;
+       if (index > radix_tree_maxindex(height))
+               return NULL;
 
        shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-       slot = &root->rnode;
 
-       while (height > 0) {
-               if (*slot == NULL)
+       do {
+               slot = (struct radix_tree_node **)
+                       (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+               node = *slot;
+               if (node == NULL)
                        return NULL;
 
-               slot = (struct radix_tree_node **)
-                       ((*slot)->slots +
-                               ((index >> shift) & RADIX_TREE_MAP_MASK));
                shift -= RADIX_TREE_MAP_SHIFT;
                height--;
-       }
+       } while (height > 0);
 
        return (void **)slot;
 }
-
-/**
- *     radix_tree_lookup_slot    -    lookup a slot in a radix tree
- *     @root:          radix tree root
- *     @index:         index key
- *
- *     Lookup the slot corresponding to the position @index in the radix tree
- *     @root. This is useful for update-if-exists operations.
- */
-void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
-{
-       return __lookup_slot(root, index);
-}
 EXPORT_SYMBOL(radix_tree_lookup_slot);
 
 /**
@@ -359,13 +382,45 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
  *     @index:         index key
  *
  *     Lookup the item at the position @index in the radix tree @root.
+ *
+ *     This function can be called under rcu_read_lock, however the caller
+ *     must manage lifetimes of leaf nodes (eg. RCU may also be used to free
+ *     them safely). No RCU barriers are required to access or modify the
+ *     returned item, however.
  */
 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 {
-       void **slot;
+       unsigned int height, shift;
+       struct radix_tree_node *node, **slot;
+
+       node = rcu_dereference(root->rnode);
+       if (node == NULL)
+               return NULL;
+
+       if (radix_tree_is_direct_ptr(node)) {
+               if (index > 0)
+                       return NULL;
+               return radix_tree_direct_to_ptr(node);
+       }
+
+       height = node->height;
+       if (index > radix_tree_maxindex(height))
+               return NULL;
+
+       shift = (height-1) * RADIX_TREE_MAP_SHIFT;
 
-       slot = __lookup_slot(root, index);
-       return slot != NULL ? *slot : NULL;
+       do {
+               slot = (struct radix_tree_node **)
+                       (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
+               node = rcu_dereference(*slot);
+               if (node == NULL)
+                       return NULL;
+
+               shift -= RADIX_TREE_MAP_SHIFT;
+               height--;
+       } while (height > 0);
+
+       return node;
 }
 EXPORT_SYMBOL(radix_tree_lookup);
 
@@ -495,27 +550,30 @@ int radix_tree_tag_get(struct radix_tree_root *root,
                        unsigned long index, unsigned int tag)
 {
        unsigned int height, shift;
-       struct radix_tree_node *slot;
+       struct radix_tree_node *node;
        int saw_unset_tag = 0;
 
-       height = root->height;
-       if (index > radix_tree_maxindex(height))
-               return 0;
-
        /* check the root's tag bit */
        if (!root_tag_get(root, tag))
                return 0;
 
-       if (height == 0)
-               return 1;
+       node = rcu_dereference(root->rnode);
+       if (node == NULL)
+               return 0;
+
+       if (radix_tree_is_direct_ptr(node))
+               return (index == 0);
+
+       height = node->height;
+       if (index > radix_tree_maxindex(height))
+               return 0;
 
        shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-       slot = root->rnode;
 
        for ( ; ; ) {
                int offset;
 
-               if (slot == NULL)
+               if (node == NULL)
                        return 0;
 
                offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -524,15 +582,15 @@ int radix_tree_tag_get(struct radix_tree_root *root,
                 * This is just a debug check.  Later, we can bale as soon as
                 * we see an unset tag.
                 */
-               if (!tag_get(slot, tag, offset))
+               if (!tag_get(node, tag, offset))
                        saw_unset_tag = 1;
                if (height == 1) {
-                       int ret = tag_get(slot, tag, offset);
+                       int ret = tag_get(node, tag, offset);
 
                        BUG_ON(ret && saw_unset_tag);
                        return !!ret;
                }
-               slot = slot->slots[offset];
+               node = rcu_dereference(node->slots[offset]);
                shift -= RADIX_TREE_MAP_SHIFT;
                height--;
        }
@@ -541,47 +599,45 @@ EXPORT_SYMBOL(radix_tree_tag_get);
 #endif
 
 static unsigned int
-__lookup(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
        unsigned int max_items, unsigned long *next_index)
 {
        unsigned int nr_found = 0;
        unsigned int shift, height;
-       struct radix_tree_node *slot;
        unsigned long i;
 
-       height = root->height;
-       if (height == 0) {
-               if (root->rnode && index == 0)
-                       results[nr_found++] = root->rnode;
+       height = slot->height;
+       if (height == 0)
                goto out;
-       }
-
        shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-       slot = root->rnode;
 
        for ( ; height > 1; height--) {
-
-               for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
-                               i < RADIX_TREE_MAP_SIZE; i++) {
+               i = (index >> shift) & RADIX_TREE_MAP_MASK;
+               for (;;) {
                        if (slot->slots[i] != NULL)
                                break;
                        index &= ~((1UL << shift) - 1);
                        index += 1UL << shift;
                        if (index == 0)
                                goto out;       /* 32-bit wraparound */
+                       i++;
+                       if (i == RADIX_TREE_MAP_SIZE)
+                               goto out;
                }
-               if (i == RADIX_TREE_MAP_SIZE)
-                       goto out;
 
                shift -= RADIX_TREE_MAP_SHIFT;
-               slot = slot->slots[i];
+               slot = rcu_dereference(slot->slots[i]);
+               if (slot == NULL)
+                       goto out;
        }
 
        /* Bottom level: grab some items */
        for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
+               struct radix_tree_node *node;
                index++;
-               if (slot->slots[i]) {
-                       results[nr_found++] = slot->slots[i];
+               node = slot->slots[i];
+               if (node) {
+                       results[nr_found++] = rcu_dereference(node);
                        if (nr_found == max_items)
                                goto out;
                }
@@ -603,28 +659,51 @@ out:
  *     *@results.
  *
  *     The implementation is naive.
+ *
+ *     Like radix_tree_lookup, radix_tree_gang_lookup may be called under
+ *     rcu_read_lock. In this case, rather than the returned results being
+ *     an atomic snapshot of the tree at a single point in time, the semantics
+ *     of an RCU protected gang lookup are as though multiple radix_tree_lookups
+ *     have been issued in individual locks, and results stored in 'results'.
  */
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                        unsigned long first_index, unsigned int max_items)
 {
-       const unsigned long max_index = radix_tree_maxindex(root->height);
+       unsigned long max_index;
+       struct radix_tree_node *node;
        unsigned long cur_index = first_index;
-       unsigned int ret = 0;
+       unsigned int ret;
+
+       node = rcu_dereference(root->rnode);
+       if (!node)
+               return 0;
 
+       if (radix_tree_is_direct_ptr(node)) {
+               if (first_index > 0)
+                       return 0;
+               node = radix_tree_direct_to_ptr(node);
+               results[0] = rcu_dereference(node);
+               return 1;
+       }
+
+       max_index = radix_tree_maxindex(node->height);
+
+       ret = 0;
        while (ret < max_items) {
                unsigned int nr_found;
                unsigned long next_index;       /* Index of next search */
 
                if (cur_index > max_index)
                        break;
-               nr_found = __lookup(root, results + ret, cur_index,
+               nr_found = __lookup(node, results + ret, cur_index,
                                        max_items - ret, &next_index);
                ret += nr_found;
                if (next_index == 0)
                        break;
                cur_index = next_index;
        }
+
        return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup);
@@ -634,55 +713,64 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
  * open-coding the search.
  */
 static unsigned int
-__lookup_tag(struct radix_tree_root *root, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
        unsigned int max_items, unsigned long *next_index, unsigned int tag)
 {
        unsigned int nr_found = 0;
-       unsigned int shift;
-       unsigned int height = root->height;
-       struct radix_tree_node *slot;
+       unsigned int shift, height;
 
-       if (height == 0) {
-               if (root->rnode && index == 0)
-                       results[nr_found++] = root->rnode;
+       height = slot->height;
+       if (height == 0)
                goto out;
-       }
-
-       shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-       slot = root->rnode;
+       shift = (height-1) * RADIX_TREE_MAP_SHIFT;
 
-       do {
-               unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
+       while (height > 0) {
+               unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK ;
 
-               for ( ; i < RADIX_TREE_MAP_SIZE; i++) {
-                       if (tag_get(slot, tag, i)) {
-                               BUG_ON(slot->slots[i] == NULL);
+               for (;;) {
+                       if (tag_get(slot, tag, i))
                                break;
-                       }
                        index &= ~((1UL << shift) - 1);
                        index += 1UL << shift;
                        if (index == 0)
                                goto out;       /* 32-bit wraparound */
+                       i++;
+                       if (i == RADIX_TREE_MAP_SIZE)
+                               goto out;
                }
-               if (i == RADIX_TREE_MAP_SIZE)
-                       goto out;
                height--;
                if (height == 0) {      /* Bottom level: grab some items */
                        unsigned long j = index & RADIX_TREE_MAP_MASK;
 
                        for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
+                               struct radix_tree_node *node;
                                index++;
-                               if (tag_get(slot, tag, j)) {
-                                       BUG_ON(slot->slots[j] == NULL);
-                                       results[nr_found++] = slot->slots[j];
+                               if (!tag_get(slot, tag, j))
+                                       continue;
+                               node = slot->slots[j];
+                               /*
+                                * Even though the tag was found set, we need to
+                                * recheck that we have a non-NULL node, because
+                                * if this lookup is lockless, it may have been
+                                * subsequently deleted.
+                                *
+                                * Similar care must be taken in any place that
+                                * lookup ->slots[x] without a lock (ie. can't
+                                * rely on its value remaining the same).
+                                */
+                               if (node) {
+                                       node = rcu_dereference(node);
+                                       results[nr_found++] = node;
                                        if (nr_found == max_items)
                                                goto out;
                                }
                        }
                }
                shift -= RADIX_TREE_MAP_SHIFT;
-               slot = slot->slots[i];
-       } while (height > 0);
+               slot = rcu_dereference(slot->slots[i]);
+               if (slot == NULL)
+                       break;
+       }
 out:
        *next_index = index;
        return nr_found;
@@ -706,27 +794,44 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
                unsigned long first_index, unsigned int max_items,
                unsigned int tag)
 {
-       const unsigned long max_index = radix_tree_maxindex(root->height);
+       struct radix_tree_node *node;
+       unsigned long max_index;
        unsigned long cur_index = first_index;
-       unsigned int ret = 0;
+       unsigned int ret;
 
        /* check the root's tag bit */
        if (!root_tag_get(root, tag))
                return 0;
 
+       node = rcu_dereference(root->rnode);
+       if (!node)
+               return 0;
+
+       if (radix_tree_is_direct_ptr(node)) {
+               if (first_index > 0)
+                       return 0;
+               node = radix_tree_direct_to_ptr(node);
+               results[0] = rcu_dereference(node);
+               return 1;
+       }
+
+       max_index = radix_tree_maxindex(node->height);
+
+       ret = 0;
        while (ret < max_items) {
                unsigned int nr_found;
                unsigned long next_index;       /* Index of next search */
 
                if (cur_index > max_index)
                        break;
-               nr_found = __lookup_tag(root, results + ret, cur_index,
+               nr_found = __lookup_tag(node, results + ret, cur_index,
                                        max_items - ret, &next_index, tag);
                ret += nr_found;
                if (next_index == 0)
                        break;
                cur_index = next_index;
        }
+
        return ret;
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
@@ -742,8 +847,19 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
                        root->rnode->count == 1 &&
                        root->rnode->slots[0]) {
                struct radix_tree_node *to_free = root->rnode;
+               void *newptr;
 
-               root->rnode = to_free->slots[0];
+               /*
+                * We don't need rcu_assign_pointer(), since we are simply
+                * moving the node from one part of the tree to another. If
+                * it was safe to dereference the old pointer to it
+                * (to_free->slots[0]), it will be safe to dereference the new
+                * one (root->rnode).
+                */
+               newptr = to_free->slots[0];
+               if (root->height == 1)
+                       newptr = radix_tree_ptr_to_direct(newptr);
+               root->rnode = newptr;
                root->height--;
                /* must only free zeroed nodes into the slab */
                tag_clear(to_free, 0, 0);
@@ -767,6 +883,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
 {
        struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
        struct radix_tree_node *slot = NULL;
+       struct radix_tree_node *to_free;
        unsigned int height, shift;
        int tag;
        int offset;
@@ -777,6 +894,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
 
        slot = root->rnode;
        if (height == 0 && root->rnode) {
+               slot = radix_tree_direct_to_ptr(slot);
                root_tag_clear_all(root);
                root->rnode = NULL;
                goto out;
@@ -809,10 +927,17 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
                        radix_tree_tag_clear(root, index, tag);
        }
 
+       to_free = NULL;
        /* Now free the nodes we do not need anymore */
        while (pathp->node) {
                pathp->node->slots[pathp->offset] = NULL;
                pathp->node->count--;
+               /*
+                * Queue the node for deferred freeing after the
+                * last reference to it disappears (set NULL, above).
+                */
+               if (to_free)
+                       radix_tree_node_free(to_free);
 
                if (pathp->node->count) {
                        if (pathp->node == root->rnode)
@@ -821,13 +946,15 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
                }
 
                /* Node with zero slots in use so free it */
-               radix_tree_node_free(pathp->node);
-
+               to_free = pathp->node;
                pathp--;
+
        }
        root_tag_clear_all(root);
        root->height = 0;
        root->rnode = NULL;
+       if (to_free)
+               radix_tree_node_free(to_free);
 
 out:
        return slot;
@@ -846,7 +973,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
 EXPORT_SYMBOL(radix_tree_tagged);
 
 static void
-radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags)
+radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags)
 {
        memset(node, 0, sizeof(struct radix_tree_node));
 }
@@ -869,7 +996,6 @@ static __init void radix_tree_init_maxindex(void)
                height_to_maxindex[i] = __maxindex(i);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int radix_tree_callback(struct notifier_block *nfb,
                             unsigned long action,
                             void *hcpu)
@@ -889,7 +1015,6 @@ static int radix_tree_callback(struct notifier_block *nfb,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init radix_tree_init(void)
 {
index b6c4f898197c52f0b175ad0ea9e26466c338ea30..479fd462eaa9c7630df676bdddd91d4dd1212a2c 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/nmi.h>
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/delay.h>
@@ -117,6 +118,9 @@ static void __spin_lock_debug(spinlock_t *lock)
                                raw_smp_processor_id(), current->comm,
                                current->pid, lock);
                        dump_stack();
+#ifdef CONFIG_SMP
+                       trigger_all_cpu_backtrace();
+#endif
                }
        }
 }
index eaa9abeea5364f6578c53c607d17955e3aebaef1..b2486cf887a06c1ecbd3c3d60d10682e2e5fc7d0 100644 (file)
 void percpu_depopulate(void *__pdata, int cpu)
 {
        struct percpu_data *pdata = __percpu_disguise(__pdata);
-       if (pdata->ptrs[cpu]) {
-               kfree(pdata->ptrs[cpu]);
-               pdata->ptrs[cpu] = NULL;
-       }
+
+       kfree(pdata->ptrs[cpu]);
+       pdata->ptrs[cpu] = NULL;
 }
 EXPORT_SYMBOL_GPL(percpu_depopulate);
 
@@ -123,6 +122,8 @@ EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
  */
 void percpu_free(void *__pdata)
 {
+       if (unlikely(!__pdata))
+               return;
        __percpu_depopulate_mask(__pdata, &cpu_possible_map);
        kfree(__percpu_disguise(__pdata));
 }
index d53112fcb4040a3a2ecf930e013eb9933bc6d34f..00a96970b237efe9a62c8c6cd8859ef625868bc1 100644 (file)
@@ -27,8 +27,6 @@ unsigned long max_low_pfn;
 unsigned long min_low_pfn;
 unsigned long max_pfn;
 
-EXPORT_UNUSED_SYMBOL(max_pfn);  /*  June 2006  */
-
 static LIST_HEAD(bdata_list);
 #ifdef CONFIG_CRASH_DUMP
 /*
@@ -196,6 +194,10 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
        if (limit && bdata->node_boot_start >= limit)
                return NULL;
 
+       /* on nodes without memory - bootmem_map is NULL */
+       if (!bdata->node_bootmem_map)
+               return NULL;
+
        end_pfn = bdata->node_low_pfn;
        limit = PFN_DOWN(limit);
        if (limit && end_pfn > limit)
index 7b84dc814347544d68ab662f11691b25ca54ac3a..af7e2f5caea94c12a21847fe7fe23047b58c344e 100644 (file)
@@ -1445,7 +1445,6 @@ no_cached_page:
         * effect.
         */
        error = page_cache_read(file, pgoff);
-       grab_swap_token();
 
        /*
         * The page we want has now been added to the page cache.
@@ -1893,6 +1892,7 @@ int should_remove_suid(struct dentry *dentry)
 
        return 0;
 }
+EXPORT_SYMBOL(should_remove_suid);
 
 int __remove_suid(struct dentry *dentry, int kill)
 {
index 7a9d0f5d246da922c132738f2476a688812fc1ac..b77a002c3352f7c91898b75b6636cc60e3ceadc2 100644 (file)
@@ -101,7 +101,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        int err = -ENOMEM;
        pte_t *pte;
-       pte_t pte_val;
        spinlock_t *ptl;
 
        pte = get_locked_pte(mm, addr, &ptl);
@@ -114,7 +113,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
-       pte_val = *pte;
        /*
         * We don't need to run update_mmu_cache() here because the "file pte"
         * being installed by install_file_pte() is not a real pte - it's a
index a088f593a807532aa9b37307a04b5427becfd449..0ccc7f2302529b0e3a237c556f895ab4ddfb3911 100644 (file)
@@ -109,7 +109,7 @@ static int alloc_fresh_huge_page(void)
        if (nid == MAX_NUMNODES)
                nid = first_node(node_online_map);
        if (page) {
-               page[1].lru.next = (void *)free_huge_page;      /* dtor */
+               set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
                nr_huge_pages_node[page_to_nid(page)]++;
@@ -344,7 +344,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        entry = *src_pte;
                        ptepage = pte_page(entry);
                        get_page(ptepage);
-                       add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
                }
                spin_unlock(&src->page_table_lock);
@@ -365,6 +364,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        pte_t pte;
        struct page *page;
        struct page *tmp;
+       /*
+        * A page gathering list, protected by per file i_mmap_lock. The
+        * lock is used to avoid list corruption from multiple unmapping
+        * of the same page since we are using page->lru.
+        */
        LIST_HEAD(page_list);
 
        WARN_ON(!is_vm_hugetlb_page(vma));
@@ -372,24 +376,21 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        BUG_ON(end & ~HPAGE_MASK);
 
        spin_lock(&mm->page_table_lock);
-
-       /* Update high watermark before we lower rss */
-       update_hiwater_rss(mm);
-
        for (address = start; address < end; address += HPAGE_SIZE) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
 
+               if (huge_pmd_unshare(mm, &address, ptep))
+                       continue;
+
                pte = huge_ptep_get_and_clear(mm, address, ptep);
                if (pte_none(pte))
                        continue;
 
                page = pte_page(pte);
                list_add(&page->lru, &page_list);
-               add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
        }
-
        spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
@@ -515,7 +516,6 @@ retry:
        if (!pte_none(*ptep))
                goto backout;
 
-       add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
@@ -653,11 +653,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
+       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += HPAGE_SIZE) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
+               if (huge_pmd_unshare(mm, &address, ptep))
+                       continue;
                if (!pte_none(*ptep)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
@@ -666,6 +669,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
+       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 
        flush_tlb_range(vma, start, end);
 }
index 156861fcac436e4716537c7e5dff565dded43224..4198df0dff1c0f026355c1e8538761e81ba7254e 100644 (file)
@@ -1902,7 +1902,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
 
        return 0;
 }
-EXPORT_UNUSED_SYMBOL(vmtruncate_range);  /*  June 2006  */
 
 /**
  * swapin_readahead - swap in pages in hope we need them soon
@@ -1991,6 +1990,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
+               grab_swap_token(); /* Contend for token _before_ read-in */
                swapin_readahead(entry, address, vma);
                page = read_swap_cache_async(entry, vma, address);
                if (!page) {
@@ -2008,7 +2008,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
-               grab_swap_token();
        }
 
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
index fd678a662eae1a015ef6393c2b6f23e586592105..0c055a090f4df19e042312e2a30cef3140fb8558 100644 (file)
@@ -72,7 +72,6 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
                        return ret;
        }
        memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
-       zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
        return 0;
 }
 
index 617fb31086eef17d45f5b04df65ff3a851a7a19c..b917d6fdc1bb45e01ab7d03ee5f227fa33080daf 100644 (file)
@@ -141,9 +141,11 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes)
        enum zone_type k;
 
        max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
+       max++;                  /* space for zlcache_ptr (see mmzone.h) */
        zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
        if (!zl)
                return NULL;
+       zl->zlcache_ptr = NULL;
        num = 0;
        /* First put in the highest zones from all nodes, then all the next 
           lower zones etc. Avoid empty zones because the memory allocator
@@ -219,7 +221,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
        orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        do {
                struct page *page;
-               unsigned int nid;
+               int nid;
 
                if (!pte_present(*pte))
                        continue;
@@ -1324,7 +1326,7 @@ struct mempolicy *__mpol_copy(struct mempolicy *old)
        atomic_set(&new->refcnt, 1);
        if (new->policy == MPOL_BIND) {
                int sz = ksize(old->v.zonelist);
-               new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL);
+               new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
                if (!new->v.zonelist) {
                        kmem_cache_free(policy_cache, new);
                        return ERR_PTR(-ENOMEM);
@@ -1705,8 +1707,8 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
  * Display pages allocated per node and memory policy via /proc.
  */
 
-static const char *policy_types[] = { "default", "prefer", "bind",
-                                     "interleave" };
+static const char * const policy_types[] =
+       { "default", "prefer", "bind", "interleave" };
 
 /*
  * Convert a mempolicy into a string.
index b4979d423d2be5b7885f4cd7e24f45bdf7aa27ca..e9b161bde95b4c382092a1c94a8fcfbd4bcf58f0 100644 (file)
@@ -294,7 +294,7 @@ out:
 static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page)
 {
-       struct page **radix_pointer;
+       void **pslot;
 
        if (!mapping) {
                /* Anonymous page */
@@ -305,12 +305,11 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        write_lock_irq(&mapping->tree_lock);
 
-       radix_pointer = (struct page **)radix_tree_lookup_slot(
-                                               &mapping->page_tree,
-                                               page_index(page));
+       pslot = radix_tree_lookup_slot(&mapping->page_tree,
+                                       page_index(page));
 
        if (page_count(page) != 2 + !!PagePrivate(page) ||
-                       *radix_pointer != page) {
+                       (struct page *)radix_tree_deref_slot(pslot) != page) {
                write_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -318,7 +317,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        /*
         * Now we know that no one else is looking at the page.
         */
-       get_page(newpage);
+       get_page(newpage);      /* add cache reference */
 #ifdef CONFIG_SWAP
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
@@ -326,8 +325,14 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        }
 #endif
 
-       *radix_pointer = newpage;
+       radix_tree_replace_slot(pslot, newpage);
+
+       /*
+        * Drop cache reference from old page.
+        * We know this isn't the last reference.
+        */
        __put_page(page);
+
        write_unlock_irq(&mapping->tree_lock);
 
        return 0;
index b90c59573abf5da8bd00dcc7b224796ce66befc5..3446b7ef731e6c419cc0cf2656829eeca6fea5a7 100644 (file)
@@ -65,7 +65,7 @@ success:
                        ret = make_pages_present(start, end);
        }
 
-       vma->vm_mm->locked_vm -= pages;
+       mm->locked_vm -= pages;
 out:
        if (ret == -ENOMEM)
                ret = -EAGAIN;
index 7b40abd7cba26aeb49595af3f2ec15099e11e5ee..7be110e98d4c845e66ab29f8b804cf9a9298a5c9 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1736,7 +1736,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -2057,7 +2057,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                    vma_start < new_vma->vm_end)
                        *vmap = new_vma;
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
                        pol = mpol_copy(vma_policy(vma));
index febea1c981685af5f05122434aa6f0ce11778430..eb5838634f181a1d6c58c948c10aac09dd272f26 100644 (file)
@@ -14,8 +14,6 @@ struct pglist_data *first_online_pgdat(void)
        return NODE_DATA(first_online_node);
 }
 
-EXPORT_UNUSED_SYMBOL(first_online_pgdat);  /*  June 2006  */
-
 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
 {
        int nid = next_online_node(pgdat->node_id);
@@ -24,8 +22,6 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
                return NULL;
        return NODE_DATA(nid);
 }
-EXPORT_UNUSED_SYMBOL(next_online_pgdat);  /*  June 2006  */
-
 
 /*
  * next_zone - helper magic for for_each_zone()
@@ -45,5 +41,4 @@ struct zone *next_zone(struct zone *zone)
        }
        return zone;
 }
-EXPORT_UNUSED_SYMBOL(next_zone);  /*  June 2006  */
 
index 8bdde9508f3b80fc8ed5738e3716f6d2d0872c3c..af874569d0f1956b096a3a49c6b75edada4fc898 100644 (file)
@@ -497,15 +497,17 @@ static int validate_mmap_request(struct file *file,
            (flags & MAP_TYPE) != MAP_SHARED)
                return -EINVAL;
 
-       if (PAGE_ALIGN(len) == 0)
-               return addr;
-
-       if (len > TASK_SIZE)
+       if (!len)
                return -EINVAL;
 
+       /* Careful about overflows.. */
+       len = PAGE_ALIGN(len);
+       if (!len || len > TASK_SIZE)
+               return -ENOMEM;
+
        /* offset overflow? */
        if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-               return -EINVAL;
+               return -EOVERFLOW;
 
        if (file) {
                /* validate file mapping requests */
@@ -806,10 +808,9 @@ unsigned long do_mmap_pgoff(struct file *file,
        vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
        /* we're going to need to record the mapping if it works */
-       vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
+       vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
        if (!vml)
                goto error_getting_vml;
-       memset(vml, 0, sizeof(*vml));
 
        down_write(&nommu_vma_sem);
 
@@ -885,11 +886,10 @@ unsigned long do_mmap_pgoff(struct file *file,
        }
 
        /* we're going to need a VMA struct as well */
-       vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
+       vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
        if (!vma)
                goto error_getting_vma;
 
-       memset(vma, 0, sizeof(*vma));
        INIT_LIST_HEAD(&vma->anon_vma_node);
        atomic_set(&vma->vm_usage, 1);
        if (file)
index 2e3ce3a928b97dd8eeb54b42fb6f363d15a6d2ae..223d9ccb7d64b6b28f5642ba3ab12629a170419a 100644 (file)
@@ -264,7 +264,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
  * flag though it's unlikely that  we select a process with CAP_SYS_RAW_IO
  * set.
  */
-static void __oom_kill_task(struct task_struct *p, const char *message)
+static void __oom_kill_task(struct task_struct *p, int verbose)
 {
        if (is_init(p)) {
                WARN_ON(1);
@@ -278,10 +278,8 @@ static void __oom_kill_task(struct task_struct *p, const char *message)
                return;
        }
 
-       if (message) {
-               printk(KERN_ERR "%s: Killed process %d (%s).\n",
-                               message, p->pid, p->comm);
-       }
+       if (verbose)
+               printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
 
        /*
         * We give our sacrificial lamb high priority and access to
@@ -294,7 +292,7 @@ static void __oom_kill_task(struct task_struct *p, const char *message)
        force_sig(SIGKILL, p);
 }
 
-static int oom_kill_task(struct task_struct *p, const char *message)
+static int oom_kill_task(struct task_struct *p)
 {
        struct mm_struct *mm;
        struct task_struct *g, *q;
@@ -313,15 +311,25 @@ static int oom_kill_task(struct task_struct *p, const char *message)
        if (mm == NULL)
                return 1;
 
-       __oom_kill_task(p, message);
+       /*
+        * Don't kill the process if any threads are set to OOM_DISABLE
+        */
+       do_each_thread(g, q) {
+               if (q->mm == mm && p->oomkilladj == OOM_DISABLE)
+                       return 1;
+       } while_each_thread(g, q);
+
+       __oom_kill_task(p, 1);
+
        /*
         * kill all processes that share the ->mm (i.e. all threads),
-        * but are in a different thread group
+        * but are in a different thread group. Don't let them have access
+        * to memory reserves though, otherwise we might deplete all memory.
         */
-       do_each_thread(g, q)
+       do_each_thread(g, q) {
                if (q->mm == mm && q->tgid != p->tgid)
-                       __oom_kill_task(q, message);
-       while_each_thread(g, q);
+                       force_sig(SIGKILL, p);
+       while_each_thread(g, q);
 
        return 0;
 }
@@ -337,21 +345,22 @@ static int oom_kill_process(struct task_struct *p, unsigned long points,
         * its children or threads, just set TIF_MEMDIE so it can die quickly
         */
        if (p->flags & PF_EXITING) {
-               __oom_kill_task(p, NULL);
+               __oom_kill_task(p, 0);
                return 0;
        }
 
-       printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li"
-                       " and children.\n", p->pid, p->comm, points);
+       printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
+                                       message, p->pid, p->comm, points);
+
        /* Try to kill a child first */
        list_for_each(tsk, &p->children) {
                c = list_entry(tsk, struct task_struct, sibling);
                if (c->mm == p->mm)
                        continue;
-               if (!oom_kill_task(c, message))
+               if (!oom_kill_task(c))
                        return 0;
        }
-       return oom_kill_task(p, message);
+       return oom_kill_task(p);
 }
 
 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
index aa6fcc7ca66f1f8bc976b5798039c3a0672d09eb..cace22b3ac25dc0f4af6fe10a7cc0a74c9d32778 100644 (file)
@@ -83,14 +83,7 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 
 EXPORT_SYMBOL(totalram_pages);
 
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = {
+static char * const zone_names[MAX_NR_ZONES] = {
         "DMA",
 #ifdef CONFIG_ZONE_DMA32
         "DMA32",
@@ -237,7 +230,7 @@ static void prep_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       page[1].lru.next = (void *)free_compound_page;  /* set dtor */
+       set_compound_page_dtor(page, free_compound_page);
        page[1].lru.prev = (void *)order;
        for (i = 0; i < nr_pages; i++) {
                struct page *p = page + i;
@@ -486,7 +479,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order)
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
-       __free_one_page(page, zone ,order);
+       __free_one_page(page, zoneorder);
        spin_unlock(&zone->lock);
 }
 
@@ -605,6 +598,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                        1 << PG_checked | 1 << PG_mappedtodisk);
        set_page_private(page, 0);
        set_page_refcounted(page);
+
+       arch_alloc_page(page, order);
        kernel_map_pages(page, 1 << order, 1);
 
        if (gfp_flags & __GFP_ZERO)
@@ -690,9 +685,15 @@ void drain_node_pages(int nodeid)
 
                        pcp = &pset->pcp[i];
                        if (pcp->count) {
+                               int to_drain;
+
                                local_irq_save(flags);
-                               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-                               pcp->count = 0;
+                               if (pcp->count >= pcp->batch)
+                                       to_drain = pcp->batch;
+                               else
+                                       to_drain = pcp->count;
+                               free_pages_bulk(zone, to_drain, &pcp->list, 0);
+                               pcp->count -= to_drain;
                                local_irq_restore(flags);
                        }
                }
@@ -700,7 +701,6 @@ void drain_node_pages(int nodeid)
 }
 #endif
 
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
        unsigned long flags;
@@ -722,7 +722,6 @@ static void __drain_pages(unsigned int cpu)
                }
        }
 }
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM
 
@@ -925,31 +924,160 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
        return 1;
 }
 
+#ifdef CONFIG_NUMA
+/*
+ * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
+ * skip over zones that are not allowed by the cpuset, or that have
+ * been recently (in last second) found to be nearly full.  See further
+ * comments in mmzone.h.  Reduces cache footprint of zonelist scans
+ * that have to skip over alot of full or unallowed zones.
+ *
+ * If the zonelist cache is present in the passed in zonelist, then
+ * returns a pointer to the allowed node mask (either the current
+ * tasks mems_allowed, or node_online_map.)
+ *
+ * If the zonelist cache is not available for this zonelist, does
+ * nothing and returns NULL.
+ *
+ * If the fullzones BITMAP in the zonelist cache is stale (more than
+ * a second since last zap'd) then we zap it out (clear its bits.)
+ *
+ * We hold off even calling zlc_setup, until after we've checked the
+ * first zone in the zonelist, on the theory that most allocations will
+ * be satisfied from that first zone, so best to examine that zone as
+ * quickly as we can.
+ */
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       nodemask_t *allowednodes;       /* zonelist_cache approximation */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return NULL;
+
+       if (jiffies - zlc->last_full_zap > 1 * HZ) {
+               bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+               zlc->last_full_zap = jiffies;
+       }
+
+       allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
+                                       &cpuset_current_mems_allowed :
+                                       &node_online_map;
+       return allowednodes;
+}
+
+/*
+ * Given 'z' scanning a zonelist, run a couple of quick checks to see
+ * if it is worth looking at further for free memory:
+ *  1) Check that the zone isn't thought to be full (doesn't have its
+ *     bit set in the zonelist_cache fullzones BITMAP).
+ *  2) Check that the zones node (obtained from the zonelist_cache
+ *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
+ * Return true (non-zero) if zone is worth looking at further, or
+ * else return false (zero) if it is not.
+ *
+ * This check -ignores- the distinction between various watermarks,
+ * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
+ * found to be full for any variation of these watermarks, it will
+ * be considered full for up to one second by all requests, unless
+ * we are so low on memory on all allowed nodes that we are forced
+ * into the second scan of the zonelist.
+ *
+ * In the second scan we ignore this zonelist cache and exactly
+ * apply the watermarks to all zones, even it is slower to do so.
+ * We are low on memory in the second scan, and should leave no stone
+ * unturned looking for a free page.
+ */
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+                                               nodemask_t *allowednodes)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       int i;                          /* index of *z in zonelist zones */
+       int n;                          /* node that zone *z is on */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return 1;
+
+       i = z - zonelist->zones;
+       n = zlc->z_to_n[i];
+
+       /* This zone is worth trying if it is allowed but not full */
+       return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
+}
+
 /*
- * get_page_from_freeliest goes through the zonelist trying to allocate
+ * Given 'z' scanning a zonelist, set the corresponding bit in
+ * zlc->fullzones, so that subsequent attempts to allocate a page
+ * from that zone don't waste time re-examining it.
+ */
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       int i;                          /* index of *z in zonelist zones */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return;
+
+       i = z - zonelist->zones;
+
+       set_bit(i, zlc->fullzones);
+}
+
+#else  /* CONFIG_NUMA */
+
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+       return NULL;
+}
+
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+                               nodemask_t *allowednodes)
+{
+       return 1;
+}
+
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+}
+#endif /* CONFIG_NUMA */
+
+/*
+ * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist, int alloc_flags)
 {
-       struct zone **z = zonelist->zones;
+       struct zone **z;
        struct page *page = NULL;
-       int classzone_idx = zone_idx(*z);
+       int classzone_idx = zone_idx(zonelist->zones[0]);
        struct zone *zone;
+       nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+       int zlc_active = 0;             /* set if using zonelist_cache */
+       int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
+zonelist_scan:
        /*
-        * Go through the zonelist once, looking for a zone with enough free.
+        * Scan zonelist, looking for a zone with enough free.
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
+       z = zonelist->zones;
+
        do {
+               if (NUMA_BUILD && zlc_active &&
+                       !zlc_zone_worth_trying(zonelist, z, allowednodes))
+                               continue;
                zone = *z;
                if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                               !cpuset_zone_allowed(zone, gfp_mask))
-                       continue;
+                       !cpuset_zone_allowed(zone, gfp_mask))
+                               goto try_next_zone;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
@@ -959,18 +1087,34 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                mark = zone->pages_low;
                        else
                                mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone , order, mark,
-                                   classzone_idx, alloc_flags))
+                       if (!zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags)) {
                                if (!zone_reclaim_mode ||
                                    !zone_reclaim(zone, gfp_mask, order))
-                                       continue;
+                                       goto this_zone_full;
+                       }
                }
 
                page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
-               if (page) {
+               if (page)
                        break;
+this_zone_full:
+               if (NUMA_BUILD)
+                       zlc_mark_zone_full(zonelist, z);
+try_next_zone:
+               if (NUMA_BUILD && !did_zlc_setup) {
+                       /* we do zlc_setup after the first zone is tried */
+                       allowednodes = zlc_setup(zonelist, alloc_flags);
+                       zlc_active = 1;
+                       did_zlc_setup = 1;
                }
        } while (*(++z) != NULL);
+
+       if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+               /* Disable zlc cache for second zonelist scan */
+               zlc_active = 0;
+               goto zonelist_scan;
+       }
        return page;
 }
 
@@ -1005,9 +1149,19 @@ restart:
        if (page)
                goto got_pg;
 
-       do {
+       /*
+        * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
+        * __GFP_NOWARN set) should not cause reclaim since the subsystem
+        * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
+        * using a larger set of nodes after it has established that the
+        * allowed per node queues are empty and that nodes are
+        * over allocated.
+        */
+       if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+               goto nopage;
+
+       for (z = zonelist->zones; *z; z++)
                wakeup_kswapd(*z, order);
-       } while (*(++z));
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -1041,6 +1195,7 @@ restart:
 
        /* This allocation should allow future memory freeing. */
 
+rebalance:
        if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
                        && !in_interrupt()) {
                if (!(gfp_mask & __GFP_NOMEMALLOC)) {
@@ -1062,7 +1217,6 @@ nofail_alloc:
        if (!wait)
                goto nopage;
 
-rebalance:
        cond_resched();
 
        /* We now go into synchronous reclaim */
@@ -1262,7 +1416,7 @@ unsigned int nr_free_pagecache_pages(void)
 static inline void show_node(struct zone *zone)
 {
        if (NUMA_BUILD)
-               printk("Node %ld ", zone_to_nid(zone));
+               printk("Node %d ", zone_to_nid(zone));
 }
 
 void si_meminfo(struct sysinfo *val)
@@ -1542,6 +1696,24 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
        }
 }
 
+/* Construct the zonelist performance cache - see further mmzone.h */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+       int i;
+
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               struct zonelist *zonelist;
+               struct zonelist_cache *zlc;
+               struct zone **z;
+
+               zonelist = pgdat->node_zonelists + i;
+               zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+               bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+               for (z = zonelist->zones; *z; z++)
+                       zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
+       }
+}
+
 #else  /* CONFIG_NUMA */
 
 static void __meminit build_zonelists(pg_data_t *pgdat)
@@ -1579,14 +1751,26 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
        }
 }
 
+/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
+static void __meminit build_zonelist_cache(pg_data_t *pgdat)
+{
+       int i;
+
+       for (i = 0; i < MAX_NR_ZONES; i++)
+               pgdat->node_zonelists[i].zlcache_ptr = NULL;
+}
+
 #endif /* CONFIG_NUMA */
 
 /* return values int ....just for stop_machine_run() */
 static int __meminit __build_all_zonelists(void *dummy)
 {
        int nid;
-       for_each_online_node(nid)
+
+       for_each_online_node(nid) {
                build_zonelists(NODE_DATA(nid));
+               build_zonelist_cache(NODE_DATA(nid));
+       }
        return 0;
 }
 
@@ -1715,20 +1899,6 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
        }
 }
 
-#define ZONETABLE_INDEX(x, zone_nr)    ((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-               unsigned long pfn, unsigned long size)
-{
-       unsigned long snum = pfn_to_section_nr(pfn);
-       unsigned long end = pfn_to_section_nr(pfn + size);
-
-       if (FLAGS_HAS_NODE)
-               zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
-       else
-               for (; snum <= end; snum++)
-                       zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
-}
-
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
        memmap_init_zone((size), (nid), (zone), (start_pfn))
@@ -1881,16 +2051,16 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
        int ret = NOTIFY_OK;
 
        switch (action) {
-               case CPU_UP_PREPARE:
-                       if (process_zones(cpu))
-                               ret = NOTIFY_BAD;
-                       break;
-               case CPU_UP_CANCELED:
-               case CPU_DEAD:
-                       free_zone_pagesets(cpu);
-                       break;
-               default:
-                       break;
+       case CPU_UP_PREPARE:
+               if (process_zones(cpu))
+                       ret = NOTIFY_BAD;
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_DEAD:
+               free_zone_pagesets(cpu);
+               break;
+       default:
+               break;
        }
        return ret;
 }
@@ -2421,7 +2591,6 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                if (!size)
                        continue;
 
-               zonetable_add(zone, nid, j, zone_start_pfn, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn, size);
                BUG_ON(ret);
                zone_start_pfn += size;
@@ -2736,7 +2905,6 @@ void __init free_area_init(unsigned long *zones_size)
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int page_alloc_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
 {
@@ -2751,7 +2919,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init page_alloc_init(void)
 {
@@ -3055,7 +3222,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        /* allow the kernel cmdline to have a say */
        if (!numentries) {
                /* round applicable memory size up to nearest megabyte */
-               numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+               numentries = nr_kernel_pages;
                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
                numentries >>= 20 - PAGE_SHIFT;
                numentries <<= 20 - PAGE_SHIFT;
index d4840ecbf8f9358e1dfc5918802f3604e1f06b93..dbffec0d78c98407737ac544cb2ea5ad17a9c7c2 100644 (file)
@@ -147,48 +147,3 @@ int swap_readpage(struct file *file, struct page *page)
 out:
        return ret;
 }
-
-#ifdef CONFIG_SOFTWARE_SUSPEND
-/*
- * A scruffy utility function to read or write an arbitrary swap page
- * and wait on the I/O.  The caller must have a ref on the page.
- *
- * We use end_swap_bio_read() even for writes, because it happens to do what
- * we want.
- */
-int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
-                       struct bio **bio_chain)
-{
-       struct bio *bio;
-       int ret = 0;
-       int bio_rw;
-
-       lock_page(page);
-
-       bio = get_swap_bio(GFP_KERNEL, entry.val, page, end_swap_bio_read);
-       if (bio == NULL) {
-               unlock_page(page);
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       bio_rw = rw;
-       if (!bio_chain)
-               bio_rw |= (1 << BIO_RW_SYNC);
-       if (bio_chain)
-               bio_get(bio);
-       submit_bio(bio_rw, bio);
-       if (bio_chain == NULL) {
-               wait_on_page_locked(page);
-
-               if (!PageUptodate(page) || PageError(page))
-                       ret = -EIO;
-       }
-       if (bio_chain) {
-               bio->bi_private = *bio_chain;
-               *bio_chain = bio;
-       }
-out:
-       return ret;
-}
-#endif
index b02102feeb4be03d346583f2c81c8e7aba4e3ed8..8ce0900dc95ce13052ee6241f7cf6de91f828a8a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/writeback.h>   // Prototypes pdflush_operation()
 #include <linux/kthread.h>
 #include <linux/cpuset.h>
+#include <linux/freezer.h>
 
 
 /*
index 23cb61a01c6e4123f313f5487a5942e7ee4c5fe4..a386f2b6b3354d9736ad76495f6dfc8e6224fb61 100644 (file)
@@ -148,13 +148,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
                if (!pagevec_add(&lru_pvec, page))
                        __pagevec_lru_add(&lru_pvec);
                if (ret) {
-                       while (!list_empty(pages)) {
-                               struct page *victim;
-
-                               victim = list_to_page(pages);
-                               list_del(&victim->lru);
-                               page_cache_release(victim);
-                       }
+                       put_pages_list(pages);
                        break;
                }
        }
index 4959535fc14c4a7aa3308bb316c4a13a3ba2baa3..c820b4f77b8d259d98be9cd1da471b8560d2c216 100644 (file)
@@ -177,7 +177,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
 
 static struct super_operations shmem_ops;
 static const struct address_space_operations shmem_aops;
-static struct file_operations shmem_file_operations;
+static const struct file_operations shmem_file_operations;
 static struct inode_operations shmem_inode_operations;
 static struct inode_operations shmem_dir_inode_operations;
 static struct inode_operations shmem_special_inode_operations;
@@ -1943,7 +1943,7 @@ static int shmem_xattr_security_set(struct inode *inode, const char *name,
        return security_inode_setsecurity(inode, name, value, size, flags);
 }
 
-struct xattr_handler shmem_xattr_security_handler = {
+static struct xattr_handler shmem_xattr_security_handler = {
        .prefix = XATTR_SECURITY_PREFIX,
        .list   = shmem_xattr_security_list,
        .get    = shmem_xattr_security_get,
@@ -2263,7 +2263,7 @@ static struct kmem_cache *shmem_inode_cachep;
 static struct inode *shmem_alloc_inode(struct super_block *sb)
 {
        struct shmem_inode_info *p;
-       p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
+       p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
        if (!p)
                return NULL;
        return &p->vfs_inode;
@@ -2319,7 +2319,7 @@ static const struct address_space_operations shmem_aops = {
        .migratepage    = migrate_page,
 };
 
-static struct file_operations shmem_file_operations = {
+static const struct file_operations shmem_file_operations = {
        .mmap           = shmem_mmap,
 #ifdef CONFIG_TMPFS
        .llseek         = generic_file_llseek,
index 3c4a7e34eddc4de763feea96ce1b654f06bd9052..068cb4503c15fcb50d6421b10f3b7d7b8f6ab771 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/module.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
+#include       <linux/uaccess.h>
 #include       <linux/nodemask.h>
 #include       <linux/mempolicy.h>
 #include       <linux/mutex.h>
 #include       <linux/rtmutex.h>
 
-#include       <asm/uaccess.h>
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
 #include       <asm/page.h>
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
                        int node);
 static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
 
 /*
  * This function must be completely optimized away if a constant is passed to
@@ -730,7 +730,10 @@ static inline void init_lock_keys(void)
 }
 #endif
 
-/* Guard access to the cache-chain. */
+/*
+ * 1. Guard access to the cache-chain.
+ * 2. Protect sanity of cpu_online_map against cpu hotplug events
+ */
 static DEFINE_MUTEX(cache_chain_mutex);
 static struct list_head cache_chain;
 
@@ -753,7 +756,7 @@ int slab_is_available(void)
        return g_cpucache_up == FULL;
 }
 
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
 
 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
 {
@@ -866,6 +869,22 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
        dump_stack();
 }
 
+/*
+ * By default on NUMA we use alien caches to stage the freeing of
+ * objects allocated from other nodes. This causes massive memory
+ * inefficiencies when using fake NUMA setup to split memory into a
+ * large number of small nodes, so it can be disabled on the command
+ * line
+  */
+
+static int use_alien_caches __read_mostly = 1;
+static int __init noaliencache_setup(char *s)
+{
+       use_alien_caches = 0;
+       return 1;
+}
+__setup("noaliencache", noaliencache_setup);
+
 #ifdef CONFIG_NUMA
 /*
  * Special reaping functions for NUMA systems called from cache_reap().
@@ -916,16 +935,16 @@ static void next_reap_node(void)
  */
 static void __devinit start_cpu_timer(int cpu)
 {
-       struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+       struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
 
        /*
         * When this gets called from do_initcalls via cpucache_init(),
         * init_workqueues() has already run, so keventd will be setup
         * at that time.
         */
-       if (keventd_up() && reap_work->func == NULL) {
+       if (keventd_up() && reap_work->work.func == NULL) {
                init_reap_node(cpu);
-               INIT_WORK(reap_work, cache_reap, NULL);
+               INIT_DELAYED_WORK(reap_work, cache_reap);
                schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
        }
 }
@@ -996,7 +1015,7 @@ static inline void *alternate_node_alloc(struct kmem_cache *cachep,
        return NULL;
 }
 
-static inline void *__cache_alloc_node(struct kmem_cache *cachep,
+static inline void *____cache_alloc_node(struct kmem_cache *cachep,
                 gfp_t flags, int nodeid)
 {
        return NULL;
@@ -1004,7 +1023,7 @@ static inline void *__cache_alloc_node(struct kmem_cache *cachep,
 
 #else  /* CONFIG_NUMA */
 
-static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 
 static struct array_cache **alloc_alien_cache(int node, int limit)
@@ -1114,7 +1133,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
         * Make sure we are not freeing a object from another node to the array
         * cache on this cpu.
         */
-       if (likely(slabp->nodeid == node))
+       if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches))
                return 0;
 
        l3 = cachep->nodelists[node];
@@ -1192,7 +1211,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
                        struct array_cache *shared;
-                       struct array_cache **alien;
+                       struct array_cache **alien = NULL;
 
                        nc = alloc_arraycache(node, cachep->limit,
                                                cachep->batchcount);
@@ -1204,9 +1223,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                        if (!shared)
                                goto bad;
 
-                       alien = alloc_alien_cache(node, cachep->limit);
-                       if (!alien)
-                               goto bad;
+                       if (use_alien_caches) {
+                                alien = alloc_alien_cache(node, cachep->limit);
+                                if (!alien)
+                                        goto bad;
+                        }
                        cachep->array[cpu] = nc;
                        l3 = cachep->nodelists[node];
                        BUG_ON(!l3);
@@ -1230,12 +1251,18 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                        kfree(shared);
                        free_alien_cache(alien);
                }
-               mutex_unlock(&cache_chain_mutex);
                break;
        case CPU_ONLINE:
+               mutex_unlock(&cache_chain_mutex);
                start_cpu_timer(cpu);
                break;
 #ifdef CONFIG_HOTPLUG_CPU
+       case CPU_DOWN_PREPARE:
+               mutex_lock(&cache_chain_mutex);
+               break;
+       case CPU_DOWN_FAILED:
+               mutex_unlock(&cache_chain_mutex);
+               break;
        case CPU_DEAD:
                /*
                 * Even if all the cpus of a node are down, we don't free the
@@ -1246,8 +1273,8 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
                 * gets destroyed at kmem_cache_destroy().
                 */
                /* fall thru */
+#endif
        case CPU_UP_CANCELED:
-               mutex_lock(&cache_chain_mutex);
                list_for_each_entry(cachep, &cache_chain, next) {
                        struct array_cache *nc;
                        struct array_cache *shared;
@@ -1308,11 +1335,9 @@ free_array_cache:
                }
                mutex_unlock(&cache_chain_mutex);
                break;
-#endif
        }
        return NOTIFY_OK;
 bad:
-       mutex_unlock(&cache_chain_mutex);
        return NOTIFY_BAD;
 }
 
@@ -1580,12 +1605,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
        flags |= __GFP_COMP;
 #endif
 
-       /*
-        * Under NUMA we want memory on the indicated node. We will handle
-        * the needed fallback ourselves since we want to serve from our
-        * per node object lists first for other nodes.
-        */
-       flags |= cachep->gfpflags | GFP_THISNODE;
+       flags |= cachep->gfpflags;
 
        page = alloc_pages_node(nodeid, flags, cachep->gfporder);
        if (!page)
@@ -2098,15 +2118,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        }
 
        /*
-        * Prevent CPUs from coming and going.
-        * lock_cpu_hotplug() nests outside cache_chain_mutex
+        * We use cache_chain_mutex to ensure a consistent view of
+        * cpu_online_map as well.  Please see cpuup_callback
         */
-       lock_cpu_hotplug();
-
        mutex_lock(&cache_chain_mutex);
 
        list_for_each_entry(pc, &cache_chain, next) {
-               mm_segment_t old_fs = get_fs();
                char tmp;
                int res;
 
@@ -2115,9 +2132,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
                 * destroy its slab cache and no-one else reuses the vmalloc
                 * area of the module.  Print a warning.
                 */
-               set_fs(KERNEL_DS);
-               res = __get_user(tmp, pc->name);
-               set_fs(old_fs);
+               res = probe_kernel_address(pc->name, tmp);
                if (res) {
                        printk("SLAB: cache with size %d has lost its name\n",
                               pc->buffer_size);
@@ -2197,25 +2212,24 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
                ralign = BYTES_PER_WORD;
 
-       /* 2) arch mandated alignment: disables debug if necessary */
+       /* 2) arch mandated alignment */
        if (ralign < ARCH_SLAB_MINALIGN) {
                ralign = ARCH_SLAB_MINALIGN;
-               if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
-       /* 3) caller mandated alignment: disables debug if necessary */
+       /* 3) caller mandated alignment */
        if (ralign < align) {
                ralign = align;
-               if (ralign > BYTES_PER_WORD)
-                       flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        }
+       /* disable debug if necessary */
+       if (ralign > BYTES_PER_WORD)
+               flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
        /*
         * 4) Store it.
         */
        align = ralign;
 
        /* Get cache's description obj. */
-       cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
+       cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
        if (!cachep)
                goto oops;
 
@@ -2326,7 +2340,6 @@ oops:
                panic("kmem_cache_create(): failed to create slab `%s'\n",
                      name);
        mutex_unlock(&cache_chain_mutex);
-       unlock_cpu_hotplug();
        return cachep;
 }
 EXPORT_SYMBOL(kmem_cache_create);
@@ -2444,6 +2457,7 @@ out:
        return nr_freed;
 }
 
+/* Called with cache_chain_mutex held to protect against cpu hotplug */
 static int __cache_shrink(struct kmem_cache *cachep)
 {
        int ret = 0, i = 0;
@@ -2474,9 +2488,13 @@ static int __cache_shrink(struct kmem_cache *cachep)
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
+       int ret;
        BUG_ON(!cachep || in_interrupt());
 
-       return __cache_shrink(cachep);
+       mutex_lock(&cache_chain_mutex);
+       ret = __cache_shrink(cachep);
+       mutex_unlock(&cache_chain_mutex);
+       return ret;
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
@@ -2500,23 +2518,16 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
 {
        BUG_ON(!cachep || in_interrupt());
 
-       /* Don't let CPUs to come and go */
-       lock_cpu_hotplug();
-
        /* Find the cache in the chain of caches. */
        mutex_lock(&cache_chain_mutex);
        /*
         * the chain is never empty, cache_cache is never destroyed
         */
        list_del(&cachep->next);
-       mutex_unlock(&cache_chain_mutex);
-
        if (__cache_shrink(cachep)) {
                slab_error(cachep, "Can't free all objects");
-               mutex_lock(&cache_chain_mutex);
                list_add(&cachep->next, &cache_chain);
                mutex_unlock(&cache_chain_mutex);
-               unlock_cpu_hotplug();
                return;
        }
 
@@ -2524,7 +2535,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
                synchronize_rcu();
 
        __kmem_cache_destroy(cachep);
-       unlock_cpu_hotplug();
+       mutex_unlock(&cache_chain_mutex);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
 
@@ -2548,7 +2559,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
        if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc_node(cachep->slabp_cache,
-                                             local_flags, nodeid);
+                                             local_flags & ~GFP_THISNODE, nodeid);
                if (!slabp)
                        return NULL;
        } else {
@@ -2618,7 +2629,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
 
 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
 {
-       if (flags & SLAB_DMA)
+       if (flags & GFP_DMA)
                BUG_ON(!(cachep->gfpflags & GFP_DMA));
        else
                BUG_ON(cachep->gfpflags & GFP_DMA);
@@ -2689,10 +2700,10 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
  * Grow (by 1) the number of slabs within a cache.  This is called by
  * kmem_cache_alloc() when there are no active objs left in a cache.
  */
-static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep,
+               gfp_t flags, int nodeid, void *objp)
 {
        struct slab *slabp;
-       void *objp;
        size_t offset;
        gfp_t local_flags;
        unsigned long ctor_flags;
@@ -2702,12 +2713,12 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
-       if (flags & SLAB_NO_GROW)
+       BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW));
+       if (flags & __GFP_NO_GROW)
                return 0;
 
        ctor_flags = SLAB_CTOR_CONSTRUCTOR;
-       local_flags = (flags & SLAB_LEVEL_MASK);
+       local_flags = (flags & GFP_LEVEL_MASK);
        if (!(local_flags & __GFP_WAIT))
                /*
                 * Not allowed to sleep.  Need to tell a constructor about
@@ -2744,12 +2755,14 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
         * Get mem for the objs.  Attempt to allocate a physical page from
         * 'nodeid'.
         */
-       objp = kmem_getpages(cachep, flags, nodeid);
+       if (!objp)
+               objp = kmem_getpages(cachep, flags, nodeid);
        if (!objp)
                goto failed;
 
        /* Get slab management. */
-       slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
+       slabp = alloc_slabmgmt(cachep, objp, offset,
+                       local_flags & ~GFP_THISNODE, nodeid);
        if (!slabp)
                goto opps1;
 
@@ -2987,7 +3000,7 @@ alloc_done:
 
        if (unlikely(!ac->avail)) {
                int x;
-               x = cache_grow(cachep, flags, node);
+               x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
 
                /* cache_grow can reenable interrupts, then ac could change. */
                ac = cpu_cache_get(cachep);
@@ -3063,6 +3076,12 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
 
                cachep->ctor(objp, cachep, ctor_flags);
        }
+#if ARCH_SLAB_MINALIGN
+       if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+               printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+                      objp, ARCH_SLAB_MINALIGN);
+       }
+#endif
        return objp;
 }
 #else
@@ -3105,10 +3124,10 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
                objp = ____cache_alloc(cachep, flags);
        /*
         * We may just have run out of memory on the local node.
-        * __cache_alloc_node() knows how to locate memory on other nodes
+        * ____cache_alloc_node() knows how to locate memory on other nodes
         */
        if (NUMA_BUILD && !objp)
-               objp = __cache_alloc_node(cachep, flags, numa_node_id());
+               objp = ____cache_alloc_node(cachep, flags, numa_node_id());
        local_irq_restore(save_flags);
        objp = cache_alloc_debugcheck_after(cachep, flags, objp,
                                            caller);
@@ -3135,15 +3154,17 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
        else if (current->mempolicy)
                nid_alloc = slab_node(current->mempolicy);
        if (nid_alloc != nid_here)
-               return __cache_alloc_node(cachep, flags, nid_alloc);
+               return ____cache_alloc_node(cachep, flags, nid_alloc);
        return NULL;
 }
 
 /*
  * Fallback function if there was no memory available and no objects on a
- * certain node and we are allowed to fall back. We mimick the behavior of
- * the page allocator. We fall back according to a zonelist determined by
- * the policy layer while obeying cpuset constraints.
+ * certain node and fall back is permitted. First we scan all the
+ * available nodelists for available objects. If that fails then we
+ * perform an allocation without specifying a node. This allows the page
+ * allocator to do its reclaim / fallback magic. We then insert the
+ * slab into the proper nodelist and then allocate from it.
  */
 void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
@@ -3151,15 +3172,51 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
                                        ->node_zonelists[gfp_zone(flags)];
        struct zone **z;
        void *obj = NULL;
+       int nid;
 
+retry:
+       /*
+        * Look through allowed nodes for objects available
+        * from existing per node queues.
+        */
        for (z = zonelist->zones; *z && !obj; z++) {
-               int nid = zone_to_nid(*z);
+               nid = zone_to_nid(*z);
+
+               if (cpuset_zone_allowed(*z, flags) &&
+                       cache->nodelists[nid] &&
+                       cache->nodelists[nid]->free_objects)
+                               obj = ____cache_alloc_node(cache,
+                                       flags | GFP_THISNODE, nid);
+       }
 
-               if (zone_idx(*z) <= ZONE_NORMAL &&
-                               cpuset_zone_allowed(*z, flags) &&
-                               cache->nodelists[nid])
-                       obj = __cache_alloc_node(cache,
-                                       flags | __GFP_THISNODE, nid);
+       if (!obj) {
+               /*
+                * This allocation will be performed within the constraints
+                * of the current cpuset / memory policy requirements.
+                * We may trigger various forms of reclaim on the allowed
+                * set and go into memory reserves if necessary.
+                */
+               obj = kmem_getpages(cache, flags, -1);
+               if (obj) {
+                       /*
+                        * Insert into the appropriate per node queues
+                        */
+                       nid = page_to_nid(virt_to_page(obj));
+                       if (cache_grow(cache, flags, nid, obj)) {
+                               obj = ____cache_alloc_node(cache,
+                                       flags | GFP_THISNODE, nid);
+                               if (!obj)
+                                       /*
+                                        * Another processor may allocate the
+                                        * objects in the slab since we are
+                                        * not holding any locks.
+                                        */
+                                       goto retry;
+                       } else {
+                               kmem_freepages(cache, obj);
+                               obj = NULL;
+                       }
+               }
        }
        return obj;
 }
@@ -3167,7 +3224,7 @@ void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 /*
  * A interface to enable slab creation on nodeid
  */
-static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
                                int nodeid)
 {
        struct list_head *entry;
@@ -3216,7 +3273,7 @@ retry:
 
 must_grow:
        spin_unlock(&l3->list_lock);
-       x = cache_grow(cachep, flags, nodeid);
+       x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
        if (x)
                goto retry;
 
@@ -3434,35 +3491,59 @@ out:
  * @flags: See kmalloc().
  * @nodeid: node number of the target node.
  *
- * Identical to kmem_cache_alloc, except that this function is slow
- * and can sleep. And it will allocate memory on the given node, which
- * can improve the performance for cpu bound structures.
- * New and improved: it will now make sure that the object gets
- * put on the correct node list so that there is no false sharing.
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
  */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+               int nodeid, void *caller)
 {
        unsigned long save_flags;
-       void *ptr;
+       void *ptr = NULL;
 
        cache_alloc_debugcheck_before(cachep, flags);
        local_irq_save(save_flags);
 
-       if (nodeid == -1 || nodeid == numa_node_id() ||
-                       !cachep->nodelists[nodeid])
-               ptr = ____cache_alloc(cachep, flags);
-       else
-               ptr = __cache_alloc_node(cachep, flags, nodeid);
-       local_irq_restore(save_flags);
+       if (unlikely(nodeid == -1))
+               nodeid = numa_node_id();
+
+       if (likely(cachep->nodelists[nodeid])) {
+               if (nodeid == numa_node_id()) {
+                       /*
+                        * Use the locally cached objects if possible.
+                        * However ____cache_alloc does not allow fallback
+                        * to other nodes. It may fail while we still have
+                        * objects on other nodes available.
+                        */
+                       ptr = ____cache_alloc(cachep, flags);
+               }
+               if (!ptr) {
+                       /* ___cache_alloc_node can fall back to other nodes */
+                       ptr = ____cache_alloc_node(cachep, flags, nodeid);
+               }
+       } else {
+               /* Node not bootstrapped yet */
+               if (!(flags & __GFP_THISNODE))
+                       ptr = fallback_alloc(cachep, flags);
+       }
 
-       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
-                                          __builtin_return_address(0));
+       local_irq_restore(save_flags);
+       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
 
        return ptr;
 }
+
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+{
+       return __cache_alloc_node(cachep, flags, nodeid,
+                       __builtin_return_address(0));
+}
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
 {
        struct kmem_cache *cachep;
 
@@ -3471,8 +3552,29 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
                return NULL;
        return kmem_cache_alloc_node(cachep, flags, node);
 }
+
+#ifdef CONFIG_DEBUG_SLAB
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+       return __do_kmalloc_node(size, flags, node,
+                       __builtin_return_address(0));
+}
 EXPORT_SYMBOL(__kmalloc_node);
-#endif
+
+void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
+               int node, void *caller)
+{
+       return __do_kmalloc_node(size, flags, node, caller);
+}
+EXPORT_SYMBOL(__kmalloc_node_track_caller);
+#else
+void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+       return __do_kmalloc_node(size, flags, node, NULL);
+}
+EXPORT_SYMBOL(__kmalloc_node);
+#endif /* CONFIG_DEBUG_SLAB */
+#endif /* CONFIG_NUMA */
 
 /**
  * __do_kmalloc - allocate memory
@@ -3583,13 +3685,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
        int node;
        struct kmem_list3 *l3;
        struct array_cache *new_shared;
-       struct array_cache **new_alien;
+       struct array_cache **new_alien = NULL;
 
        for_each_online_node(node) {
 
-               new_alien = alloc_alien_cache(node, cachep->limit);
-               if (!new_alien)
-                       goto fail;
+                if (use_alien_caches) {
+                        new_alien = alloc_alien_cache(node, cachep->limit);
+                        if (!new_alien)
+                                goto fail;
+                }
 
                new_shared = alloc_arraycache(node,
                                cachep->shared*cachep->batchcount,
@@ -3815,7 +3919,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
  * If we cannot acquire the cache chain mutex then just give up - we'll try
  * again on the next iteration.
  */
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
 {
        struct kmem_cache *searchp;
        struct kmem_list3 *l3;
@@ -4038,7 +4142,7 @@ static int s_show(struct seq_file *m, void *p)
  * + further values on SMP and with statistics enabled
  */
 
-struct seq_operations slabinfo_op = {
+const struct seq_operations slabinfo_op = {
        .start = s_start,
        .next = s_next,
        .stop = s_stop,
@@ -4236,7 +4340,7 @@ static int leaks_show(struct seq_file *m, void *p)
        return 0;
 }
 
-struct seq_operations slabstats_op = {
+const struct seq_operations slabstats_op = {
        .start = leaks_start,
        .next = s_next,
        .stop = s_stop,
index b3c82ba300124ea28d1cb952f337e387b1c4b9eb..ac26eb0d73cddeaf53b20fb7a7d31f954b8bc117 100644 (file)
@@ -24,6 +24,25 @@ struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
 #endif
 EXPORT_SYMBOL(mem_section);
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+/*
+ * If we did not store the node number in the page then we have to
+ * do a lookup in the section_to_node_table in order to find which
+ * node the page belongs to.
+ */
+#if MAX_NUMNODES <= 256
+static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#else
+static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
+#endif
+
+int page_to_nid(struct page *page)
+{
+       return section_to_node_table[page_to_section(page)];
+}
+EXPORT_SYMBOL(page_to_nid);
+#endif
+
 #ifdef CONFIG_SPARSEMEM_EXTREME
 static struct mem_section *sparse_index_alloc(int nid)
 {
@@ -49,6 +68,10 @@ static int sparse_index_init(unsigned long section_nr, int nid)
        struct mem_section *section;
        int ret = 0;
 
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+       section_to_node_table[section_nr] = nid;
+#endif
+
        if (mem_section[root])
                return -EEXIST;
 
index 2e0e871f542f45da3bddacbbe5fe4647339e1729..2ed7be39795e3034986988502b75982698a1febb 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,9 +57,9 @@ static void put_compound_page(struct page *page)
 {
        page = (struct page *)page_private(page);
        if (put_page_testzero(page)) {
-               void (*dtor)(struct page *page);
+               compound_page_dtor *dtor;
 
-               dtor = (void (*)(struct page *))page[1].lru.next;
+               dtor = get_compound_page_dtor(page);
                (*dtor)(page);
        }
 }
@@ -216,7 +216,7 @@ void lru_add_drain(void)
 }
 
 #ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
 }
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
  */
 int lru_add_drain_all(void)
 {
-       return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+       return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
 #else
@@ -514,5 +514,7 @@ void __init swap_setup(void)
         * Right now other parts of the system means that we
         * _really_ don't want to cluster much more
         */
+#ifdef CONFIG_HOTPLUG_CPU
        hotcpu_notifier(cpu_swap_callback, 0);
+#endif
 }
index a15def63f28f19e8e9d0e9f550177b80b033526e..c5431072f42244181d0aeb7eee6eae837005ad03 100644 (file)
@@ -427,34 +427,48 @@ void free_swap_and_cache(swp_entry_t entry)
 
 #ifdef CONFIG_SOFTWARE_SUSPEND
 /*
- * Find the swap type that corresponds to given device (if any)
+ * Find the swap type that corresponds to given device (if any).
  *
- * This is needed for software suspend and is done in such a way that inode
- * aliasing is allowed.
+ * @offset - number of the PAGE_SIZE-sized block of the device, starting
+ * from 0, in which the swap header is expected to be located.
+ *
+ * This is needed for the suspend to disk (aka swsusp).
  */
-int swap_type_of(dev_t device)
+int swap_type_of(dev_t device, sector_t offset)
 {
+       struct block_device *bdev = NULL;
        int i;
 
+       if (device)
+               bdev = bdget(device);
+
        spin_lock(&swap_lock);
        for (i = 0; i < nr_swapfiles; i++) {
-               struct inode *inode;
+               struct swap_info_struct *sis = swap_info + i;
 
-               if (!(swap_info[i].flags & SWP_WRITEOK))
+               if (!(sis->flags & SWP_WRITEOK))
                        continue;
 
-               if (!device) {
+               if (!bdev) {
                        spin_unlock(&swap_lock);
                        return i;
                }
-               inode = swap_info[i].swap_file->f_dentry->d_inode;
-               if (S_ISBLK(inode->i_mode) &&
-                   device == MKDEV(imajor(inode), iminor(inode))) {
-                       spin_unlock(&swap_lock);
-                       return i;
+               if (bdev == sis->bdev) {
+                       struct swap_extent *se;
+
+                       se = list_entry(sis->extent_list.next,
+                                       struct swap_extent, list);
+                       if (se->start_block == offset) {
+                               spin_unlock(&swap_lock);
+                               bdput(bdev);
+                               return i;
+                       }
                }
        }
        spin_unlock(&swap_lock);
+       if (bdev)
+               bdput(bdev);
+
        return -ENODEV;
 }
 
@@ -931,6 +945,23 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
        }
 }
 
+#ifdef CONFIG_SOFTWARE_SUSPEND
+/*
+ * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
+ * corresponding to given index in swap_info (swap type).
+ */
+sector_t swapdev_block(int swap_type, pgoff_t offset)
+{
+       struct swap_info_struct *sis;
+
+       if (swap_type >= nr_swapfiles)
+               return 0;
+
+       sis = swap_info + swap_type;
+       return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
+}
+#endif /* CONFIG_SOFTWARE_SUSPEND */
+
 /*
  * Free all of a swapdev's extent information
  */
@@ -1274,10 +1305,13 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
 
        mutex_lock(&swapon_mutex);
 
+       if (!l)
+               return SEQ_START_TOKEN;
+
        for (i = 0; i < nr_swapfiles; i++, ptr++) {
                if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
                        continue;
-               if (!l--)
+               if (!--l)
                        return ptr;
        }
 
@@ -1286,10 +1320,17 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
 
 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
 {
-       struct swap_info_struct *ptr = v;
+       struct swap_info_struct *ptr;
        struct swap_info_struct *endptr = swap_info + nr_swapfiles;
 
-       for (++ptr; ptr < endptr; ptr++) {
+       if (v == SEQ_START_TOKEN)
+               ptr = swap_info;
+       else {
+               ptr = v;
+               ptr++;
+       }
+
+       for (; ptr < endptr; ptr++) {
                if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
                        continue;
                ++*pos;
@@ -1310,8 +1351,10 @@ static int swap_show(struct seq_file *swap, void *v)
        struct file *file;
        int len;
 
-       if (v == swap_info)
-               seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+       if (ptr == SEQ_START_TOKEN) {
+               seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
+               return 0;
+       }
 
        file = ptr->swap_file;
        len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\");
@@ -1325,7 +1368,7 @@ static int swap_show(struct seq_file *swap, void *v)
        return 0;
 }
 
-static struct seq_operations swaps_op = {
+static const struct seq_operations swaps_op = {
        .start =        swap_start,
        .next =         swap_next,
        .stop =         swap_stop,
@@ -1337,7 +1380,7 @@ static int swaps_open(struct inode *inode, struct file *file)
        return seq_open(file, &swaps_op);
 }
 
-static struct file_operations proc_swaps_operations = {
+static const struct file_operations proc_swaps_operations = {
        .open           = swaps_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -1540,6 +1583,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                error = -EINVAL;
                if (!maxpages)
                        goto bad_swap;
+               if (swapfilesize && maxpages > swapfilesize) {
+                       printk(KERN_WARNING
+                              "Swap area shorter than signature indicates\n");
+                       goto bad_swap;
+               }
                if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
                        goto bad_swap;
                if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
@@ -1567,12 +1615,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                        goto bad_swap;
        }
 
-       if (swapfilesize && maxpages > swapfilesize) {
-               printk(KERN_WARNING
-                      "Swap area shorter than signature indicates\n");
-               error = -EINVAL;
-               goto bad_swap;
-       }
        if (nr_good_pages) {
                p->swap_map[0] = SWAP_MAP_BAD;
                p->max = maxpages;
index f4c560b4a2b79862340fe7457af001f8e6141715..9ef9071f99bcd7322b8e1f6808a477403b891211 100644 (file)
  *
  * Simple token based thrashing protection, using the algorithm
  * described in:  http://www.cs.wm.edu/~sjiang/token.pdf
+ *
+ * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
+ * Improved algorithm to pass token:
+ * Each task has a priority which is incremented if it contended
+ * for the token in an interval less than its previous attempt.
+ * If the token is acquired, that task's priority is boosted to prevent
+ * the token from bouncing around too often and to let the task make
+ * some progress in its execution.
  */
+
 #include <linux/jiffies.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/swap.h>
 
 static DEFINE_SPINLOCK(swap_token_lock);
-static unsigned long swap_token_timeout;
-static unsigned long swap_token_check;
-struct mm_struct * swap_token_mm = &init_mm;
-
-#define SWAP_TOKEN_CHECK_INTERVAL (HZ * 2)
-#define SWAP_TOKEN_TIMEOUT     (300 * HZ)
-/*
- * Currently disabled; Needs further code to work at HZ * 300.
- */
-unsigned long swap_token_default_timeout = SWAP_TOKEN_TIMEOUT;
-
-/*
- * Take the token away if the process had no page faults
- * in the last interval, or if it has held the token for
- * too long.
- */
-#define SWAP_TOKEN_ENOUGH_RSS 1
-#define SWAP_TOKEN_TIMED_OUT 2
-static int should_release_swap_token(struct mm_struct *mm)
-{
-       int ret = 0;
-       if (!mm->recent_pagein)
-               ret = SWAP_TOKEN_ENOUGH_RSS;
-       else if (time_after(jiffies, swap_token_timeout))
-               ret = SWAP_TOKEN_TIMED_OUT;
-       mm->recent_pagein = 0;
-       return ret;
-}
+struct mm_struct *swap_token_mm;
+static unsigned int global_faults;
 
-/*
- * Try to grab the swapout protection token.  We only try to
- * grab it once every TOKEN_CHECK_INTERVAL, both to prevent
- * SMP lock contention and to check that the process that held
- * the token before is no longer thrashing.
- */
 void grab_swap_token(void)
 {
-       struct mm_struct *mm;
-       int reason;
+       int current_interval;
 
-       /* We have the token. Let others know we still need it. */
-       if (has_swap_token(current->mm)) {
-               current->mm->recent_pagein = 1;
-               if (unlikely(!swap_token_default_timeout))
-                       disable_swap_token();
-               return;
-       }
-
-       if (time_after(jiffies, swap_token_check)) {
+       global_faults++;
 
-               if (!swap_token_default_timeout) {
-                       swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-                       return;
-               }
-
-               /* ... or if we recently held the token. */
-               if (time_before(jiffies, current->mm->swap_token_time))
-                       return;
+       current_interval = global_faults - current->mm->faultstamp;
 
-               if (!spin_trylock(&swap_token_lock))
-                       return;
+       if (!spin_trylock(&swap_token_lock))
+               return;
 
-               swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
+       /* First come first served */
+       if (swap_token_mm == NULL) {
+               current->mm->token_priority = current->mm->token_priority + 2;
+               swap_token_mm = current->mm;
+               goto out;
+       }
 
-               mm = swap_token_mm;
-               if ((reason = should_release_swap_token(mm))) {
-                       unsigned long eligible = jiffies;
-                       if (reason == SWAP_TOKEN_TIMED_OUT) {
-                               eligible += swap_token_default_timeout;
-                       }
-                       mm->swap_token_time = eligible;
-                       swap_token_timeout = jiffies + swap_token_default_timeout;
+       if (current->mm != swap_token_mm) {
+               if (current_interval < current->mm->last_interval)
+                       current->mm->token_priority++;
+               else {
+                       current->mm->token_priority--;
+                       if (unlikely(current->mm->token_priority < 0))
+                               current->mm->token_priority = 0;
+               }
+               /* Check if we deserve the token */
+               if (current->mm->token_priority >
+                               swap_token_mm->token_priority) {
+                       current->mm->token_priority += 2;
                        swap_token_mm = current->mm;
                }
-               spin_unlock(&swap_token_lock);
+       } else {
+               /* Token holder came in again! */
+               current->mm->token_priority += 2;
        }
-       return;
+
+out:
+       current->mm->faultstamp = global_faults;
+       current->mm->last_interval = current_interval;
+       spin_unlock(&swap_token_lock);
+return;
 }
 
 /* Called on process exit. */
 void __put_swap_token(struct mm_struct *mm)
 {
        spin_lock(&swap_token_lock);
-       if (likely(mm == swap_token_mm)) {
-               mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL;
-               swap_token_mm = &init_mm;
-               swap_token_check = jiffies;
-       }
+       if (likely(mm == swap_token_mm))
+               swap_token_mm = NULL;
        spin_unlock(&swap_token_lock);
 }
index 518540a4a2a66a1ade20312d1d655ba6923e05b5..093f5fe6dd7795cc5d0ca15f5918eedc822b87f4 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1172,11 +1173,12 @@ loop_again:
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               0, 0)) {
                                end_zone = i;
-                               goto scan;
+                               break;
                        }
                }
-               goto out;
-scan:
+               if (i < 0)
+                       goto out;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
@@ -1259,6 +1261,9 @@ out:
        }
        if (!all_zones_ok) {
                cond_resched();
+
+               try_to_freeze();
+
                goto loop_again;
        }
 
@@ -1508,7 +1513,6 @@ out:
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1529,7 +1533,6 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
index 8614e8f6743b599c3761d6d2ca30f0012048676e..dc005a0c96ae5a0162b9da79e43d663c61c9422a 100644 (file)
@@ -430,7 +430,7 @@ static int frag_show(struct seq_file *m, void *arg)
        return 0;
 }
 
-struct seq_operations fragmentation_op = {
+const struct seq_operations fragmentation_op = {
        .start  = frag_start,
        .next   = frag_next,
        .stop   = frag_stop,
@@ -452,7 +452,7 @@ struct seq_operations fragmentation_op = {
 #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \
                                        TEXT_FOR_HIGHMEM(xx)
 
-static char *vmstat_text[] = {
+static const char * const vmstat_text[] = {
        /* Zoned VM counters */
        "nr_anon_pages",
        "nr_mapped",
@@ -597,7 +597,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
        return 0;
 }
 
-struct seq_operations zoneinfo_op = {
+const struct seq_operations zoneinfo_op = {
        .start  = frag_start, /* iterate over all zones. The same as in
                               * fragmentation. */
        .next   = frag_next,
@@ -660,7 +660,7 @@ static void vmstat_stop(struct seq_file *m, void *arg)
        m->private = NULL;
 }
 
-struct seq_operations vmstat_op = {
+const struct seq_operations vmstat_op = {
        .start  = vmstat_start,
        .next   = vmstat_next,
        .stop   = vmstat_stop,
@@ -679,13 +679,13 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
                void *hcpu)
 {
        switch (action) {
-               case CPU_UP_PREPARE:
-               case CPU_UP_CANCELED:
-               case CPU_DEAD:
-                       refresh_zone_stat_thresholds();
-                       break;
-               default:
-                       break;
+       case CPU_UP_PREPARE:
+       case CPU_UP_CANCELED:
+       case CPU_DEAD:
+               refresh_zone_stat_thresholds();
+               break;
+       default:
+               break;
        }
        return NOTIFY_OK;
 }
index 5946ec63724f3634abc4b9cd819b3e6d6a9ac6f4..3fc0abeeaf344fac1d514b4f84b58339b6707703 100644 (file)
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
 
 #define LEC_ARP_REFRESH_INTERVAL (3*HZ)
 
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
 static void lec_arp_expire_arp(unsigned long data);
 
 /* 
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv)
         INIT_HLIST_HEAD(&priv->lec_no_forward);
         INIT_HLIST_HEAD(&priv->mcast_fwds);
        spin_lock_init(&priv->lec_arp_lock);
-       INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+       INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
        schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
 }
 
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data)
  *       to ESI_FORWARD_DIRECT. This causes the flush period to end
  *       regardless of the progress of the flush protocol.
  */
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
 {
        unsigned long flags;
-       struct lec_priv *priv = data;
+       struct lec_priv *priv =
+               container_of(work, struct lec_priv, lec_arp_work.work);
        struct hlist_node *node, *next;
        struct lec_arp_table *entry;
        unsigned long now;
index 24cc95f86741bdb0728de9222e2297dedecb0879..99136babd5357a273a8c758b5d47116fc59d6dcf 100644 (file)
@@ -92,7 +92,7 @@ struct lec_priv {
        spinlock_t lec_arp_lock;
        struct atm_vcc *mcast_vcc;              /* Default Multicast Send VCC */
        struct atm_vcc *lecd;
-       struct work_struct lec_arp_work;        /* C10 */
+       struct delayed_work lec_arp_work;       /* C10 */
        unsigned int maximum_unknown_frame_count;
                                                /*
                                                 * Within the period of time defined by this variable, the client will send
index 3eeeb7a86e753cc638993e1ef9214de51adbdfbe..d4c935692ccfe377d8fde81e297ab8cb458a7655 100644 (file)
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev)
        kfree(data);
 }
 
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
 {
-       struct hci_conn *conn = data;
+       struct hci_conn *conn = container_of(work, struct hci_conn, work);
        int i;
 
        if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
 
        dev_set_drvdata(&conn->dev, conn);
 
-       INIT_WORK(&conn->work, add_conn, (void *) conn);
+       INIT_WORK(&conn->work, add_conn);
 
        schedule_work(&conn->work);
 }
 
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
 {
-       struct hci_conn *conn = data;
+       struct hci_conn *conn = container_of(work, struct hci_conn, work);
        device_del(&conn->dev);
 }
 
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
 {
        BT_DBG("conn %p", conn);
 
-       INIT_WORK(&conn->work, del_conn, (void *) conn);
+       INIT_WORK(&conn->work, del_conn);
 
        schedule_work(&conn->work);
 }
index d9f04864d15d859d0ada8bbb57564999e55eb676..8ca448db7a0d779199fde2ed1c81f71eafbbca10 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/atomic.h>
 #include "br_private.h"
 
-static kmem_cache_t *br_fdb_cache __read_mostly;
+static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr);
 
index f753c40c11d25743d6d13982443b7ebaa1020e38..55bb2634c088cfccdb9a737449a008f406167302 100644 (file)
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev)
  * Called from work queue to allow for calling functions that
  * might sleep (such as speed check), and to debounce.
  */
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
 {
-       struct net_device *dev = arg;
        struct net_bridge_port *p;
+       struct net_device *dev;
        struct net_bridge *br;
 
+       dev = container_of(work, struct net_bridge_port,
+                          carrier_check.work)->dev;
+       work_release(work);
+
        rtnl_lock();
        p = dev->br_port;
        if (!p)
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
        p->port_no = index;
        br_init_port(p);
        p->state = BR_STATE_DISABLED;
-       INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+       INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
        br_stp_port_timer_init(p);
 
        kobject_init(&p->kobj);
index ac47ba2ba0284c9f22150305a54ac7b0ab9a6504..bd221ad52eaf76e46aa8522b4db1a75176e53ecc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_arp.h>
 #include <linux/in_route.h>
+#include <linux/inetdevice.h>
 
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -221,10 +222,14 @@ static void __br_dnat_complain(void)
  *
  * Otherwise, the packet is considered to be routed and we just
  * change the destination MAC address so that the packet will
- * later be passed up to the IP stack to be routed.
+ * later be passed up to the IP stack to be routed. For a redirected
+ * packet, ip_route_input() will give back the localhost as output device,
+ * which differs from the bridge device.
  *
  * Let us now consider the case that ip_route_input() fails:
  *
+ * This can be because the destination address is martian, in which case
+ * the packet will be dropped.
  * After a "echo '0' > /proc/sys/net/ipv4/ip_forward" ip_route_input()
  * will fail, while __ip_route_output_key() will return success. The source
  * address for __ip_route_output_key() is set to zero, so __ip_route_output_key
@@ -237,7 +242,8 @@ static void __br_dnat_complain(void)
  *
  * --Lennert, 20020411
  * --Bart, 20020416 (updated)
- * --Bart, 20021007 (updated) */
+ * --Bart, 20021007 (updated)
+ * --Bart, 20062711 (updated) */
 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
 {
        if (skb->pkt_type == PACKET_OTHERHOST) {
@@ -264,15 +270,15 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
        struct net_device *dev = skb->dev;
        struct iphdr *iph = skb->nh.iph;
        struct nf_bridge_info *nf_bridge = skb->nf_bridge;
+       int err;
 
        if (nf_bridge->mask & BRNF_PKT_TYPE) {
                skb->pkt_type = PACKET_OTHERHOST;
                nf_bridge->mask ^= BRNF_PKT_TYPE;
        }
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-
        if (dnat_took_place(skb)) {
-               if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
+               if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
                        struct rtable *rt;
                        struct flowi fl = {
                                .nl_u = {
@@ -283,19 +289,33 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
                                },
                                .proto = 0,
                        };
+                       struct in_device *in_dev = in_dev_get(dev);
+
+                       /* If err equals -EHOSTUNREACH the error is due to a
+                        * martian destination or due to the fact that
+                        * forwarding is disabled. For most martian packets,
+                        * ip_route_output_key() will fail. It won't fail for 2 types of
+                        * martian destinations: loopback destinations and destination
+                        * 0.0.0.0. In both cases the packet will be dropped because the
+                        * destination is the loopback device and not the bridge. */
+                       if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
+                               goto free_skb;
 
                        if (!ip_route_output_key(&rt, &fl)) {
                                /* - Bridged-and-DNAT'ed traffic doesn't
-                                *   require ip_forwarding.
-                                * - Deal with redirected traffic. */
-                               if (((struct dst_entry *)rt)->dev == dev ||
-                                   rt->rt_type == RTN_LOCAL) {
+                                *   require ip_forwarding. */
+                               if (((struct dst_entry *)rt)->dev == dev) {
                                        skb->dst = (struct dst_entry *)rt;
                                        goto bridged_dnat;
                                }
+                               /* we are sure that forwarding is disabled, so printing
+                                * this message is no problem. Note that the packet could
+                                * still have a martian destination address, in which case
+                                * the packet could be dropped even if forwarding were enabled */
                                __br_dnat_complain();
                                dst_release((struct dst_entry *)rt);
                        }
+free_skb:
                        kfree_skb(skb);
                        return 0;
                } else {
index 74258d86f256daf06b1717331026798640820b87..3a534e94c7f3db084e7b501dcf48dfe72038ec28 100644 (file)
@@ -82,7 +82,7 @@ struct net_bridge_port
        struct timer_list               hold_timer;
        struct timer_list               message_age_timer;
        struct kobject                  kobj;
-       struct work_struct              carrier_check;
+       struct delayed_work             carrier_check;
        struct rcu_head                 rcu;
 };
 
index 59d058a3b50487ea6c7c9dfcec8b541422ae3c45..e660cb57e42a0d1221d6c8ee5ba452f8ce9593c9 100644 (file)
@@ -3340,7 +3340,6 @@ void unregister_netdev(struct net_device *dev)
 
 EXPORT_SYMBOL(unregister_netdev);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int dev_cpu_callback(struct notifier_block *nfb,
                            unsigned long action,
                            void *ocpu)
@@ -3384,7 +3383,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_NET_DMA
 /**
index 1a5e49da0e77bdaaaaebf5369a97153ff75f4918..836ec66069254752c7ca2b9db0a597419dd758a2 100644 (file)
@@ -125,7 +125,7 @@ void * dst_alloc(struct dst_ops * ops)
                if (ops->gc())
                        return NULL;
        }
-       dst = kmem_cache_alloc(ops->kmem_cachep, SLAB_ATOMIC);
+       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
        if (!dst)
                return NULL;
        memset(dst, 0, ops->entry_size);
index b16d31ae5e54db47078a2f6039e1a776e6a02578..d137f971f97db39c702533d54550b68783550364 100644 (file)
@@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
 
 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
 
-static kmem_cache_t *flow_cachep __read_mostly;
+static struct kmem_cache *flow_cachep __read_mostly;
 
 static int flow_lwm, flow_hwm;
 
@@ -211,7 +211,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
                if (flow_count(cpu) > flow_hwm)
                        flow_cache_shrink(cpu);
 
-               fle = kmem_cache_alloc(flow_cachep, SLAB_ATOMIC);
+               fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
                        fle->next = *head;
                        *head = fle;
@@ -340,7 +340,6 @@ static void __devinit flow_cache_cpu_prepare(int cpu)
        tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int flow_cache_cpu(struct notifier_block *nfb,
                          unsigned long action,
                          void *hcpu)
@@ -349,7 +348,6 @@ static int flow_cache_cpu(struct notifier_block *nfb,
                __flow_cache_shrink((unsigned long)hcpu, 0);
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 static int __init flow_cache_init(void)
 {
index 4b36114744c57f983c8af674ee45610f6e830ac1..549a2ce951b04b01ccd8a316b10a51be33da148a 100644 (file)
@@ -34,8 +34,8 @@ enum lw_bits {
 static unsigned long linkwatch_flags;
 static unsigned long linkwatch_nextevent;
 
-static void linkwatch_event(void *dummy);
-static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
+static void linkwatch_event(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
 
 static LIST_HEAD(lweventlist);
 static DEFINE_SPINLOCK(lweventlist_lock);
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void)
 }       
 
 
-static void linkwatch_event(void *dummy)
+static void linkwatch_event(struct work_struct *dummy)
 {
        /* Limit the number of linkwatch events to one
         * per second so that a runaway driver does not
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev)
                        unsigned long delay = linkwatch_nextevent - jiffies;
 
                        /* If we wrap around we'll delay it by at most HZ. */
-                       if (!delay || delay > HZ)
-                               schedule_work(&linkwatch_work);
-                       else
-                               schedule_delayed_work(&linkwatch_work, delay);
+                       if (delay > HZ)
+                               delay = 0;
+                       schedule_delayed_work(&linkwatch_work, delay);
                }
        }
 }
index ba509a4a8e92939d9dbbbbed4ee432822979d217..0ab1987b9348505b1ca0e6e6c155b3960a68b75c 100644 (file)
@@ -251,7 +251,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
                        goto out_entries;
        }
 
-       n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
+       n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC);
        if (!n)
                goto out_entries;
 
index 3c58846fcaa5694a8b6c1396e221a02fc1c7d1c1..b3c559b9ac35cb9495edb39e4e77c8fedd5f26cf 100644 (file)
@@ -50,9 +50,10 @@ static atomic_t trapped;
 static void zap_completion_queue(void);
 static void arp_reply(struct sk_buff *skb);
 
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
 {
-       struct netpoll_info *npinfo = p;
+       struct netpoll_info *npinfo =
+               container_of(work, struct netpoll_info, tx_work.work);
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&npinfo->txq))) {
@@ -72,8 +73,6 @@ static void queue_process(void *p)
                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
                        return;
                }
-
-               netif_tx_unlock_bh(dev);
        }
 }
 
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
        if (status != NETDEV_TX_OK) {
                skb_queue_tail(&npinfo->txq, skb);
-               schedule_work(&npinfo->tx_work);
+               schedule_delayed_work(&npinfo->tx_work,0);
        }
 }
 
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np)
                spin_lock_init(&npinfo->rx_lock);
                skb_queue_head_init(&npinfo->arp_tx);
                skb_queue_head_init(&npinfo->txq);
-               INIT_WORK(&npinfo->tx_work, queue_process, npinfo);
+               INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
                atomic_set(&npinfo->refcnt, 1);
        } else {
index 8e1c385e5ba914e2f50bee987ed0c96ff516ba9c..de7801d589e74605ec0914d357abce7886a599f4 100644 (file)
@@ -68,8 +68,8 @@
 
 #include "kmap_skb.h"
 
-static kmem_cache_t *skbuff_head_cache __read_mostly;
-static kmem_cache_t *skbuff_fclone_cache __read_mostly;
+static struct kmem_cache *skbuff_head_cache __read_mostly;
+static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 
 /*
  *     Keep out-of-line to prevent kernel bloat.
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(skb_truesize_bug);
  *     @gfp_mask: allocation mask
  *     @fclone: allocate from fclone cache instead of head cache
  *             and allocate a cloned (child) skb
+ *     @node: numa node to allocate memory on
  *
  *     Allocate a new &sk_buff. The returned buffer has no headroom and a
  *     tail room of size bytes. The object has a reference count of one.
@@ -141,9 +142,9 @@ EXPORT_SYMBOL(skb_truesize_bug);
  *     %GFP_ATOMIC.
  */
 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
-                           int fclone)
+                           int fclone, int node)
 {
-       kmem_cache_t *cache;
+       struct kmem_cache *cache;
        struct skb_shared_info *shinfo;
        struct sk_buff *skb;
        u8 *data;
@@ -151,14 +152,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 
        /* Get the HEAD */
-       skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
+       skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
        if (!skb)
                goto out;
 
        /* Get the DATA. Size must match skb_add_mtu(). */
        size = SKB_DATA_ALIGN(size);
-       data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
-                       gfp_mask);
+       data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
+                       gfp_mask, node);
        if (!data)
                goto nodata;
 
@@ -210,7 +211,7 @@ nodata:
  *     Buffers may only be allocated from interrupts using a @gfp_mask of
  *     %GFP_ATOMIC.
  */
-struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
                                     unsigned int size,
                                     gfp_t gfp_mask)
 {
@@ -267,9 +268,10 @@ nodata:
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
                unsigned int length, gfp_t gfp_mask)
 {
+       int node = dev->class_dev.dev ? dev_to_node(dev->class_dev.dev) : -1;
        struct sk_buff *skb;
 
-       skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
+       skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
        if (likely(skb)) {
                skb_reserve(skb, NET_SKB_PAD);
                skb->dev = dev;
index 419c7d3289c7a9689bca3e0164ed7ac01098c2d8..0ed5b4f0bc407aef5b376149dcc2d49aa3c49937 100644 (file)
@@ -810,24 +810,11 @@ lenout:
  */
 static void inline sock_lock_init(struct sock *sk)
 {
-       spin_lock_init(&sk->sk_lock.slock);
-       sk->sk_lock.owner = NULL;
-       init_waitqueue_head(&sk->sk_lock.wq);
-       /*
-        * Make sure we are not reinitializing a held lock:
-        */
-       debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
-       /*
-        * Mark both the sk_lock and the sk_lock.slock as a
-        * per-address-family lock class:
-        */
-       lockdep_set_class_and_name(&sk->sk_lock.slock,
-                                  af_family_slock_keys + sk->sk_family,
-                                  af_family_slock_key_strings[sk->sk_family]);
-       lockdep_init_map(&sk->sk_lock.dep_map,
-                        af_family_key_strings[sk->sk_family],
-                        af_family_keys + sk->sk_family, 0);
+       sock_lock_init_class_and_name(sk,
+                       af_family_slock_key_strings[sk->sk_family],
+                       af_family_slock_keys + sk->sk_family,
+                       af_family_key_strings[sk->sk_family],
+                       af_family_keys + sk->sk_family);
 }
 
 /**
@@ -841,7 +828,7 @@ struct sock *sk_alloc(int family, gfp_t priority,
                      struct proto *prot, int zero_it)
 {
        struct sock *sk = NULL;
-       kmem_cache_t *slab = prot->slab;
+       struct kmem_cache *slab = prot->slab;
 
        if (slab != NULL)
                sk = kmem_cache_alloc(slab, priority);
index cb1b8728d7eec0c55c6151da9fba7d00f42747e9..f69ab7b4408ecca979e5e6f43aefd2a051577c06 100644 (file)
@@ -2130,7 +2130,7 @@ int iw_handler_set_spy(struct net_device *        dev,
         * The rtnl_lock() make sure we don't race with the other iw_handlers.
         * This make sure wireless_spy_update() "see" that the spy list
         * is temporarily disabled. */
-       wmb();
+       smp_wmb();
 
        /* Are there are addresses to copy? */
        if(wrqu->data.length > 0) {
@@ -2159,7 +2159,7 @@ int iw_handler_set_spy(struct net_device *        dev,
        }
 
        /* Make sure above is updated before re-enabling */
-       wmb();
+       smp_wmb();
 
        /* Enable addresses */
        spydata->spy_number = wrqu->data.length;
index bdf1bb7a82c0758725522b69e7d73fe2663332e6..1f4727ddbdbfbf1e8d1214ed2604bb4a120306ac 100644 (file)
@@ -21,8 +21,8 @@
 
 #include <net/sock.h>
 
-static kmem_cache_t *dccp_ackvec_slab;
-static kmem_cache_t *dccp_ackvec_record_slab;
+static struct kmem_cache *dccp_ackvec_slab;
+static struct kmem_cache *dccp_ackvec_record_slab;
 
 static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
 {
index ff05e59043cdb33769f810d16a8a667320d9e075..d8cf92f09e68f1896519f2583eff815cbb6f4cfb 100644 (file)
@@ -55,9 +55,9 @@ static inline void ccids_read_unlock(void)
 #define ccids_read_unlock() do { } while(0)
 #endif
 
-static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
+static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
 {
-       kmem_cache_t *slab;
+       struct kmem_cache *slab;
        char slab_name_fmt[32], *slab_name;
        va_list args;
 
@@ -75,7 +75,7 @@ static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
        return slab;
 }
 
-static void ccid_kmem_cache_destroy(kmem_cache_t *slab)
+static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
 {
        if (slab != NULL) {
                const char *name = kmem_cache_name(slab);
index c7c29514dce8a16e5b29827a7ab3cb93136169bb..bcc2d12ae81ccb13d00e428c85840b37a0ac81ec 100644 (file)
@@ -27,9 +27,9 @@ struct ccid_operations {
        unsigned char   ccid_id;
        const char      *ccid_name;
        struct module   *ccid_owner;
-       kmem_cache_t    *ccid_hc_rx_slab;
+       struct kmem_cache       *ccid_hc_rx_slab;
        __u32           ccid_hc_rx_obj_size;
-       kmem_cache_t    *ccid_hc_tx_slab;
+       struct kmem_cache       *ccid_hc_tx_slab;
        __u32           ccid_hc_tx_obj_size;
        int             (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
        int             (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
index cf8c07b2704fcc03e589e1c9d4826365c2c6ca84..66a27b9688ca9240579ce897df70637b8698eb18 100644 (file)
@@ -295,7 +295,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
        new_packet = dccp_tx_hist_head(&hctx->ccid3hctx_hist);
        if (new_packet == NULL || new_packet->dccphtx_sent) {
                new_packet = dccp_tx_hist_entry_new(ccid3_tx_hist,
-                                                   SLAB_ATOMIC);
+                                                   GFP_ATOMIC);
 
                if (unlikely(new_packet == NULL)) {
                        DCCP_WARN("%s, sk=%p, not enough mem to add to history,"
@@ -889,7 +889,7 @@ static void ccid3_hc_rx_update_li(struct sock *sk, u64 seq_loss, u8 win_loss)
                /* new loss event detected */
                /* calculate last interval length */
                seq_temp = dccp_delta_seqno(head->dccplih_seqno, seq_loss);
-               entry = dccp_li_hist_entry_new(ccid3_li_hist, SLAB_ATOMIC);
+               entry = dccp_li_hist_entry_new(ccid3_li_hist, GFP_ATOMIC);
 
                if (entry == NULL) {
                        DCCP_BUG("out of memory - can not allocate entry");
@@ -1011,7 +1011,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
        }
 
        packet = dccp_rx_hist_entry_new(ccid3_rx_hist, sk, opt_recv->dccpor_ndp,
-                                       skb, SLAB_ATOMIC);
+                                       skb, GFP_ATOMIC);
        if (unlikely(packet == NULL)) {
                DCCP_WARN("%s, sk=%p, Not enough mem to add rx packet "
                          "to history, consider it lost!\n", dccp_role(sk), sk);
index 48b9b93f8acb999aa4b92a89e0285e5e2798e80c..0a0baef16b3e02f22fc79f0849bc742e59a408d5 100644 (file)
@@ -125,7 +125,7 @@ int dccp_li_hist_interval_new(struct dccp_li_hist *hist,
        int i;
 
        for (i = 0; i < DCCP_LI_HIST_IVAL_F_LENGTH; i++) {
-               entry = dccp_li_hist_entry_new(hist, SLAB_ATOMIC);
+               entry = dccp_li_hist_entry_new(hist, GFP_ATOMIC);
                if (entry == NULL) {
                        dccp_li_hist_purge(hist, list);
                        DCCP_BUG("loss interval list entry is NULL");
index 0ae85f0340b28f58e3a0b3cfb7ef777d608ce298..eb257014dd74c25339f46d96be60641f3fc337e9 100644 (file)
@@ -20,7 +20,7 @@
 #define DCCP_LI_HIST_IVAL_F_LENGTH  8
 
 struct dccp_li_hist {
-       kmem_cache_t *dccplih_slab;
+       struct kmem_cache *dccplih_slab;
 };
 
 extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
index 067cf1c85a375daed16e3ffeeb621a46d2a5f192..9a8bcf224aa73151a3775d2426524c38e1d169dd 100644 (file)
@@ -68,14 +68,14 @@ struct dccp_rx_hist_entry {
 };
 
 struct dccp_tx_hist {
-       kmem_cache_t *dccptxh_slab;
+       struct kmem_cache *dccptxh_slab;
 };
 
 extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name);
 extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist);
 
 struct dccp_rx_hist {
-       kmem_cache_t *dccprxh_slab;
+       struct kmem_cache *dccprxh_slab;
 };
 
 extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
index 7b52f2a03eefd291deffba6b45f0c9efc2fcad63..4c9e26775f72ddad7a1c0f9f53c7c9656247e91f 100644 (file)
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = {
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&dccp_death_row),
        .twkill_work    = __WORK_INITIALIZER(dccp_death_row.twkill_work,
-                                            inet_twdr_twkill_work,
-                                            &dccp_death_row),
+                                            inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
        .twcal_hand     = -1,
index bdbc3f431668b3ff5765e06c0e92a16f804682e9..13b2421991bac8618b5fb27f139a9a2a42bdb4f3 100644 (file)
@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n
 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
 static DEFINE_RWLOCK(dn_fib_tables_lock);
 
-static kmem_cache_t *dn_hash_kmem __read_mostly;
+static struct kmem_cache *dn_hash_kmem __read_mostly;
 static int dn_fib_hash_zombies;
 
 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
@@ -590,7 +590,7 @@ create:
 
 replace:
        err = -ENOBUFS;
-       new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL);
+       new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL);
        if (new_f == NULL)
                goto out;
 
index cf51c87a971d786f0035b7e5d34cc6076889cc23..eec1a1dd91da255714c88f8081ac2d28cb94956f 100644 (file)
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
 }
 
 void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
 {
-       struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+       struct ieee80211softmac_device *mac =
+               container_of(work, struct ieee80211softmac_device,
+                            associnfo.timeout.work);
        struct ieee80211softmac_network *n;
 
        mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
 
 /* This function is called to handle userspace requests (asynchronously) */
 void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
 {
-       struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+       struct ieee80211softmac_device *mac =
+               container_of(work, struct ieee80211softmac_device,
+                            associnfo.work.work);
        struct ieee80211softmac_network *found = NULL;
        struct ieee80211_network *net = NULL, *best = NULL;
        int bssvalid;
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
                                network->authenticated = 0;
                                /* we don't want to do this more than once ... */
                                network->auth_desynced_once = 1;
-                               schedule_work(&mac->associnfo.work);
+                               schedule_delayed_work(&mac->associnfo.work, 0);
                                break;
                        }
                default:
@@ -427,6 +431,17 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
        return 0;
 }
 
+void
+ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&mac->lock, flags);
+       mac->associnfo.associating = 1;
+       schedule_work(&mac->associnfo.work);
+       spin_unlock_irqrestore(&mac->lock, flags);
+}
+
 int
 ieee80211softmac_handle_disassoc(struct net_device * dev,
                                 struct ieee80211_disassoc *disassoc)
@@ -445,8 +460,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
        dprintk(KERN_INFO PFX "got disassoc frame\n");
        ieee80211softmac_disassoc(mac);
 
-       /* try to reassociate */
-       schedule_work(&mac->associnfo.work);
+       ieee80211softmac_try_reassoc(mac);
 
        return 0;
 }
@@ -466,7 +480,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
                dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
                return 0;
        }
-       schedule_work(&mac->associnfo.work);
+       schedule_delayed_work(&mac->associnfo.work, 0);
 
        return 0;
 }
index 0612015f1c782688602e4965e23532a121e5eed6..8ed3e59b8024fd3f5dea77fd5e5df01162b3d039 100644 (file)
@@ -26,7 +26,7 @@
 
 #include "ieee80211softmac_priv.h"
 
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
 
 /* Queues an auth request to the desired AP */
 int
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
        auth->mac = mac;
        auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
        auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
-       INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+       INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
        
        /* Lock (for list) */
        spin_lock_irqsave(&mac->lock, flags);
 
        /* add to list */
        list_add_tail(&auth->list, &mac->auth_queue);
-       schedule_work(&auth->work);
+       schedule_delayed_work(&auth->work, 0);
        spin_unlock_irqrestore(&mac->lock, flags);
        
        return 0;
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
 
 /* Sends an auth request to the desired AP and handles timeouts */
 static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
 {
        struct ieee80211softmac_device *mac;
        struct ieee80211softmac_auth_queue_item *auth;
        struct ieee80211softmac_network *net;
        unsigned long flags;
 
-       auth = (struct ieee80211softmac_auth_queue_item *)data;
+       auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+                           work.work);
        net = auth->net;
        mac = auth->mac;
 
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data)
 
 /* Sends a response to an auth challenge (for shared key auth). */
 static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
 {
-       struct ieee80211softmac_auth_queue_item *aq = _aq;
+       struct ieee80211softmac_auth_queue_item *aq =
+               container_of(work, struct ieee80211softmac_auth_queue_item,
+                            work.work);
 
        /* Send our response */
        ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
                         * we have obviously already sent the initial auth
                         * request. */
                        cancel_delayed_work(&aq->work);
-                       INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
-                       schedule_work(&aq->work);
+                       INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+                       schedule_delayed_work(&aq->work, 0);
                        spin_unlock_irqrestore(&mac->lock, flags);
                        return 0;
                case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -334,6 +337,8 @@ ieee80211softmac_deauth_from_net(struct ieee80211softmac_device *mac,
        /* can't transmit data right now... */
        netif_carrier_off(mac->dev);
        spin_unlock_irqrestore(&mac->lock, flags);
+
+       ieee80211softmac_try_reassoc(mac);
 }
 
 /* 
@@ -398,6 +403,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
        ieee80211softmac_deauth_from_net(mac, net);
 
        /* let's try to re-associate */
-       schedule_work(&mac->associnfo.work);
+       schedule_delayed_work(&mac->associnfo.work, 0);
        return 0;
 }
index f34fa2ef666b5fa56247f060bae06a2d94f7f06e..b9015656cfb3835e5e2966c457d0d9269ebb17f7 100644 (file)
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
 
 
 static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
 {
-       struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
-       kfree(d);
+       struct ieee80211softmac_event *pevent =
+               container_of(work, struct ieee80211softmac_event, work.work);
+       struct ieee80211softmac_event event = *pevent;
+       kfree(pevent);
        
        event.fun(event.mac->dev, event.event_type, event.context);
 }
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
                return -ENOMEM;
        
        eventptr->event_type = event;
-       INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+       INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
        eventptr->fun = fun;
        eventptr->context = context;
        eventptr->mac = mac;
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
                                /* User may have subscribed to ANY event, so
                                 * we tell them which event triggered it. */
                                eventptr->event_type = event;
-                               schedule_work(&eventptr->work);
+                               schedule_delayed_work(&eventptr->work, 0);
                        }
                }
 }
index 33aff4f4a471fde8fe6538c46dbe8527c48536a6..256207b71dc94ce135aca1d3adc36f5c49875305 100644 (file)
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
        INIT_LIST_HEAD(&softmac->events);
 
        mutex_init(&softmac->associnfo.mutex);
-       INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
-       INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+       INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+       INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
        softmac->start_scan = ieee80211softmac_start_scan_implementation;
        softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
        softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
index 0642e090b8a7a177ce0bf63a56a3692e2e57c0a8..4c2bba34d328c811ad2261c83dc39b371b00d7c5 100644 (file)
@@ -78,7 +78,7 @@
 /* private definitions and prototypes */
 
 /*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
 /* for internal use if scanning is needed */
 int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
 void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au
 int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
 
 /*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
 int ieee80211softmac_handle_assoc_response(struct net_device * dev,
                                           struct ieee80211_assoc_response * resp,
                                           struct ieee80211_network * network);
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
                                     struct ieee80211_disassoc * disassoc);
 int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
                                        struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
 void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
 void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
 
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item {
        struct ieee80211softmac_device  *mac;   /* SoftMAC device */
        u8 retry;                               /* Retry limit */
        u8 state;                               /* Auth State */
-       struct work_struct              work;   /* Work queue */
+       struct delayed_work             work;   /* Work queue */
 };
 
 /* scanning information */
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo {
           stop:1;
        u8 skip_flags;
        struct completion finished;
-       struct work_struct softmac_scan;
+       struct delayed_work softmac_scan;
+       struct ieee80211softmac_device *mac;
 };
 
 /* private event struct */
@@ -227,7 +228,7 @@ struct ieee80211softmac_event {
        struct list_head list;
        int event_type;
        void *event_context;
-       struct work_struct work;
+       struct delayed_work work;
        notify_function_ptr fun;
        void *context;
        struct ieee80211softmac_device *mac;
@@ -238,4 +239,6 @@ void ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, in
 int ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
        int event, void *event_context, notify_function_ptr fun, void *context, gfp_t gfp_mask);
 
+void ieee80211softmac_try_reassoc(struct ieee80211softmac_device *mac);
+
 #endif /* IEEE80211SOFTMAC_PRIV_H_ */
index 5507feab32de420aef36e4f0796530cf869d34a2..0c85d6c24cdbca332a5e865fdbb2d51f0ab05621 100644 (file)
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
 
 
 /* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
 {
        int invalid_channel;
        u8 current_channel_idx;
-       struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
-       struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+       struct ieee80211softmac_scaninfo *si =
+               container_of(work, struct ieee80211softmac_scaninfo,
+                            softmac_scan.work);
+       struct ieee80211softmac_device *sm = si->mac;
        unsigned long flags;
 
        while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802
        struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
        if (unlikely(!info))
                return NULL;
-       INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+       INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+       info->mac = mac;
        init_completion(&info->finished);
        return info;
 }
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
        sm->scaninfo->started = 1;
        sm->scaninfo->stop = 0;
        INIT_COMPLETION(sm->scaninfo->finished);
-       schedule_work(&sm->scaninfo->softmac_scan);
+       schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
        spin_unlock_irqrestore(&sm->lock, flags);
        return 0;
 }
index 23068a830f7dba7916c0380ef1767758d1f95815..480d72c7a42ca861bb9524a631ea23be8a03feaf 100644 (file)
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
 
        sm->associnfo.associating = 1;
        /* queue lower level code to do work (if necessary) */
-       schedule_work(&sm->associnfo.work);
+       schedule_delayed_work(&sm->associnfo.work, 0);
 out:
        mutex_unlock(&sm->associnfo.mutex);
 
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
                /* force reassociation */
                mac->associnfo.bssvalid = 0;
                if (mac->associnfo.associated)
-                       schedule_work(&mac->associnfo.work);
+                       schedule_delayed_work(&mac->associnfo.work, 0);
        } else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
                /* the bssid we have is no longer fixed */
                mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
                /* tell the other code that this bssid should be used no matter what */
                mac->associnfo.bssfixed = 1;
                /* queue associate if new bssid or (old one again and not associated) */
-               schedule_work(&mac->associnfo.work);
+               schedule_delayed_work(&mac->associnfo.work, 0);
         }
 
  out:
@@ -495,7 +495,8 @@ ieee80211softmac_wx_set_mlme(struct net_device *dev,
                        printk(KERN_DEBUG PFX "wx_set_mlme: we should know the net here...\n");
                        goto out;
                }
-               return ieee80211softmac_deauth_req(mac, net, reason);
+               err =  ieee80211softmac_deauth_req(mac, net, reason);
+               goto out;
        case IW_MLME_DISASSOC:
                ieee80211softmac_send_disassoc_req(mac, reason);
                mac->associnfo.associated = 0;
index 107bb6cbb0b370be0f8e10b0d434a432fbc3ebdd..648f47c1c399e262c86cfc22588ff257a5babcd5 100644 (file)
@@ -45,8 +45,8 @@
 
 #include "fib_lookup.h"
 
-static kmem_cache_t *fn_hash_kmem __read_mostly;
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_hash_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 
 struct fib_node {
        struct hlist_node       fn_hash;
@@ -485,13 +485,13 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg)
                goto out;
 
        err = -ENOBUFS;
-       new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+       new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
        if (new_fa == NULL)
                goto out;
 
        new_f = NULL;
        if (!f) {
-               new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL);
+               new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL);
                if (new_f == NULL)
                        goto out_free_new_fa;
 
index d17990ec724f68d30fa6987e8c61fd7dc21fdd5d..cfb249cc0a5859c2fb06cf3877a978c9e066b14c 100644 (file)
@@ -172,7 +172,7 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 static void tnode_free(struct tnode *tn);
 
-static kmem_cache_t *fn_alias_kmem __read_mostly;
+static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct trie *trie_local = NULL, *trie_main = NULL;
 
 
@@ -1187,7 +1187,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
                        u8 state;
 
                        err = -ENOBUFS;
-                       new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+                       new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
                        if (new_fa == NULL)
                                goto out;
 
@@ -1232,7 +1232,7 @@ static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
                goto out;
 
        err = -ENOBUFS;
-       new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL);
+       new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
        if (new_fa == NULL)
                goto out;
 
index 244c4f445c7d55779a99fe301152c2241bbcacd7..8c79c8a4ea5c2e2daee5f9184d2401ddd60b3192 100644 (file)
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
  */
-struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
+struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
                                                 struct inet_bind_hashbucket *head,
                                                 const unsigned short snum)
 {
-       struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, SLAB_ATOMIC);
+       struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
        if (tb != NULL) {
                tb->port      = snum;
@@ -45,7 +45,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep,
 /*
  * Caller must hold hashbucket lock for this tb with local BH disabled
  */
-void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
+void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
 {
        if (hlist_empty(&tb->owners)) {
                __hlist_del(&tb->node);
index cdd805344c6183730f09be330a52ee4865a760b6..9f414e35c488fe2c1464b1aae47363b17968fb5f 100644 (file)
@@ -91,7 +91,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
 {
        struct inet_timewait_sock *tw =
                kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
-                                SLAB_ATOMIC);
+                                GFP_ATOMIC);
        if (tw != NULL) {
                const struct inet_sock *inet = inet_sk(sk);
 
@@ -178,7 +178,6 @@ void inet_twdr_hangman(unsigned long data)
        need_timer = 0;
        if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
                twdr->thread_slots |= (1 << twdr->slot);
-               mb();
                schedule_work(&twdr->twkill_work);
                need_timer = 1;
        } else {
@@ -197,9 +196,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman);
 
 extern void twkill_slots_invalid(void);
 
-void inet_twdr_twkill_work(void *data)
+void inet_twdr_twkill_work(struct work_struct *work)
 {
-       struct inet_timewait_death_row *twdr = data;
+       struct inet_timewait_death_row *twdr =
+               container_of(work, struct inet_timewait_death_row, twkill_work);
        int i;
 
        if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
index f072f3875af8dfd5c6505787230cc578d8085c1c..711eb6d0285a8a4709a8e89a9c1b88697dcce7ee 100644 (file)
@@ -73,7 +73,7 @@
 /* Exported for inet_getid inline function.  */
 DEFINE_SPINLOCK(inet_peer_idlock);
 
-static kmem_cache_t *peer_cachep __read_mostly;
+static struct kmem_cache *peer_cachep __read_mostly;
 
 #define node_height(x) x->avl_height
 static struct inet_peer peer_fake_node = {
index efcf45ecc8188f6aff5b6d875172487bdb514d65..ecb5422ea237d0ad811a46fa924ecd01d66720d9 100644 (file)
@@ -105,7 +105,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
    In this case data path is free of exclusive locks at all.
  */
 
-static kmem_cache_t *mrt_cachep __read_mostly;
+static struct kmem_cache *mrt_cachep __read_mostly;
 
 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
index 8832eb517d5200355356eb0281b4f4cc6be8cb61..8086787a2c51e5c0df7f4043ac8778af0a819edf 100644 (file)
@@ -44,7 +44,7 @@
 static struct list_head *ip_vs_conn_tab;
 
 /*  SLAB cache for IPVS connections */
-static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
+static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
 /*  counter for current IPVS connections */
 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
index f261616e460218b5379340ba77ba6f4b6ce48506..9b933381ebbe7fc711b8f17d8ce6616d8dff0bdc 100644 (file)
@@ -221,10 +221,10 @@ static void update_defense_level(void)
  *     Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD   1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
 {
        update_defense_level();
        if (atomic_read(&ip_vs_dropentry))
index 413c2d0a1f3dee117db48289865279113f70e348..71b76ade00e13fa9dee0bd00ec05578e910825b5 100644 (file)
@@ -375,6 +375,13 @@ static int mark_source_chains(struct xt_table_info *newinfo,
                            && unconditional(&e->arp)) {
                                unsigned int oldpos, size;
 
+                               if (t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+                                       return 0;
+                               }
+
                                /* Return: backtrack through the last
                                 * big jump.
                                 */
@@ -404,6 +411,14 @@ static int mark_source_chains(struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           ARPT_STANDARD_TARGET) == 0
                                    && newpos >= 0) {
+                                       if (newpos > newinfo->size -
+                                               sizeof(struct arpt_entry)) {
+                                               duprintf("mark_source_chains: "
+                                                       "bad verdict (%i)\n",
+                                                               newpos);
+                                               return 0;
+                                       }
+
                                        /* This a jump; chase it. */
                                        duprintf("Jump rule %u -> %u\n",
                                                 pos, newpos);
@@ -426,8 +441,6 @@ static int mark_source_chains(struct xt_table_info *newinfo,
 static inline int standard_check(const struct arpt_entry_target *t,
                                 unsigned int max_offset)
 {
-       struct arpt_standard_target *targ = (void *)t;
-
        /* Check standard info. */
        if (t->u.target_size
            != ARPT_ALIGN(sizeof(struct arpt_standard_target))) {
@@ -437,18 +450,6 @@ static inline int standard_check(const struct arpt_entry_target *t,
                return 0;
        }
 
-       if (targ->verdict >= 0
-           && targ->verdict > max_offset - sizeof(struct arpt_entry)) {
-               duprintf("arpt_standard_check: bad verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
-
-       if (targ->verdict < -NF_MAX_VERDICT - 1) {
-               duprintf("arpt_standard_check: bad negative verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
        return 1;
 }
 
@@ -627,18 +628,20 @@ static int translate_table(const char *name,
                }
        }
 
+       if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
+               duprintf("Looping hook\n");
+               return -ELOOP;
+       }
+
        /* Finally, each sanity check must pass */
        i = 0;
        ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
                                 check_entry, name, size, &i);
 
-       if (ret != 0)
-               goto cleanup;
-
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
-               duprintf("Looping hook\n");
-               goto cleanup;
+       if (ret != 0) {
+               ARPT_ENTRY_ITERATE(entry0, newinfo->size,
+                               cleanup_entry, &i);
+               return ret;
        }
 
        /* And one copy for every other CPU */
@@ -647,9 +650,6 @@ static int translate_table(const char *name,
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
 
-       return 0;
-cleanup:
-       ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
        return ret;
 }
 
index f4b0e68a16d20b1b6013a61ea10f5b66ab9cd78c..8556a4f4f60abde4adf99135e4c28967f4d2d96b 100644 (file)
@@ -65,8 +65,8 @@ static LIST_HEAD(helpers);
 unsigned int ip_conntrack_htable_size __read_mostly = 0;
 int ip_conntrack_max __read_mostly;
 struct list_head *ip_conntrack_hash __read_mostly;
-static kmem_cache_t *ip_conntrack_cachep __read_mostly;
-static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_cachep __read_mostly;
+static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly;
 struct ip_conntrack ip_conntrack_untracked;
 unsigned int ip_ct_log_invalid __read_mostly;
 static LIST_HEAD(unconfirmed);
index 8a455439b128258de3e42926640834975579d5af..0ff2956d35e5ab876d531efd2fe274c276ad5208 100644 (file)
@@ -401,6 +401,13 @@ mark_source_chains(struct xt_table_info *newinfo,
                            && unconditional(&e->ip)) {
                                unsigned int oldpos, size;
 
+                               if (t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+                                       return 0;
+                               }
+
                                /* Return: backtrack through the last
                                   big jump. */
                                do {
@@ -438,6 +445,13 @@ mark_source_chains(struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           IPT_STANDARD_TARGET) == 0
                                    && newpos >= 0) {
+                                       if (newpos > newinfo->size -
+                                               sizeof(struct ipt_entry)) {
+                                               duprintf("mark_source_chains: "
+                                                       "bad verdict (%i)\n",
+                                                               newpos);
+                                               return 0;
+                                       }
                                        /* This a jump; chase it. */
                                        duprintf("Jump rule %u -> %u\n",
                                                 pos, newpos);
@@ -469,27 +483,6 @@ cleanup_match(struct ipt_entry_match *m, unsigned int *i)
        return 0;
 }
 
-static inline int
-standard_check(const struct ipt_entry_target *t,
-              unsigned int max_offset)
-{
-       struct ipt_standard_target *targ = (void *)t;
-
-       /* Check standard info. */
-       if (targ->verdict >= 0
-           && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
-               duprintf("ipt_standard_check: bad verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
-       if (targ->verdict < -NF_MAX_VERDICT - 1) {
-               duprintf("ipt_standard_check: bad negative verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
-       return 1;
-}
-
 static inline int
 check_match(struct ipt_entry_match *m,
            const char *name,
@@ -576,12 +569,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
        if (ret)
                goto err;
 
-       if (t->u.kernel.target == &ipt_standard_target) {
-               if (!standard_check(t, size)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-       } else if (t->u.kernel.target->checkentry
+       if (t->u.kernel.target->checkentry
                   && !t->u.kernel.target->checkentry(name, e, target, t->data,
                                                      e->comefrom)) {
                duprintf("ip_tables: check failed for `%s'.\n",
@@ -718,17 +706,19 @@ translate_table(const char *name,
                }
        }
 
+       if (!mark_source_chains(newinfo, valid_hooks, entry0))
+               return -ELOOP;
+
        /* Finally, each sanity check must pass */
        i = 0;
        ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
                                check_entry, name, size, &i);
 
-       if (ret != 0)
-               goto cleanup;
-
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry0))
-               goto cleanup;
+       if (ret != 0) {
+               IPT_ENTRY_ITERATE(entry0, newinfo->size,
+                               cleanup_entry, &i);
+               return ret;
+       }
 
        /* And one copy for every other CPU */
        for_each_possible_cpu(i) {
@@ -736,9 +726,6 @@ translate_table(const char *name,
                        memcpy(newinfo->entries[i], entry0, newinfo->size);
        }
 
-       return 0;
-cleanup:
-       IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
        return ret;
 }
 
@@ -1529,25 +1516,8 @@ static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
        void **dstptr, compat_uint_t *size, const char *name,
        const struct ipt_ip *ip, unsigned int hookmask)
 {
-       struct ipt_entry_match *dm;
-       struct ipt_match *match;
-       int ret;
-
-       dm = (struct ipt_entry_match *)*dstptr;
-       match = m->u.kernel.match;
        xt_compat_match_from_user(m, dstptr, size);
-
-       ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
-                            name, hookmask, ip->proto,
-                            ip->invflags & IPT_INV_PROTO);
-       if (!ret && m->u.kernel.match->checkentry
-           && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
-                                             hookmask)) {
-               duprintf("ip_tables: check failed for `%s'.\n",
-                        m->u.kernel.match->name);
-               ret = -EINVAL;
-       }
-       return ret;
+       return 0;
 }
 
 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
@@ -1569,7 +1539,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
        ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
                        name, &de->ip, de->comefrom);
        if (ret)
-               goto err;
+               return ret;
        de->target_offset = e->target_offset - (origsize - *size);
        t = ipt_get_target(e);
        target = t->u.kernel.target;
@@ -1582,31 +1552,62 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
                if ((unsigned char *)de - base < newinfo->underflow[h])
                        newinfo->underflow[h] -= origsize - *size;
        }
+       return ret;
+}
+
+static inline int compat_check_match(struct ipt_entry_match *m, const char *name,
+                               const struct ipt_ip *ip, unsigned int hookmask)
+{
+       struct ipt_match *match;
+       int ret;
+
+       match = m->u.kernel.match;
+       ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
+                            name, hookmask, ip->proto,
+                            ip->invflags & IPT_INV_PROTO);
+       if (!ret && m->u.kernel.match->checkentry
+           && !m->u.kernel.match->checkentry(name, ip, match, m->data,
+                                             hookmask)) {
+               duprintf("ip_tables: compat: check failed for `%s'.\n",
+                        m->u.kernel.match->name);
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
+static inline int compat_check_target(struct ipt_entry *e, const char *name)
+{
+       struct ipt_entry_target *t;
+       struct ipt_target *target;
+       int ret;
 
-       t = ipt_get_target(de);
+       t = ipt_get_target(e);
        target = t->u.kernel.target;
        ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
                              name, e->comefrom, e->ip.proto,
                              e->ip.invflags & IPT_INV_PROTO);
-       if (ret)
-               goto err;
-
-       ret = -EINVAL;
-       if (t->u.kernel.target == &ipt_standard_target) {
-               if (!standard_check(t, *size))
-                       goto err;
-       } else if (t->u.kernel.target->checkentry
-                  && !t->u.kernel.target->checkentry(name, de, target,
-                                                     t->data, de->comefrom)) {
+       if (!ret && t->u.kernel.target->checkentry
+                  && !t->u.kernel.target->checkentry(name, e, target,
+                                                     t->data, e->comefrom)) {
                duprintf("ip_tables: compat: check failed for `%s'.\n",
                         t->u.kernel.target->name);
-               goto err;
+               ret = -EINVAL;
        }
-       ret = 0;
-err:
        return ret;
 }
 
+static inline int compat_check_entry(struct ipt_entry *e, const char *name)
+{
+       int ret;
+
+       ret = IPT_MATCH_ITERATE(e, compat_check_match, name, &e->ip,
+                                                               e->comefrom);
+       if (ret)
+               return ret;
+
+       return compat_check_target(e, name);
+}
+
 static int
 translate_compat_table(const char *name,
                unsigned int valid_hooks,
@@ -1695,6 +1696,11 @@ translate_compat_table(const char *name,
        if (!mark_source_chains(newinfo, valid_hooks, entry1))
                goto free_newinfo;
 
+       ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
+                                                                       name);
+       if (ret)
+               goto free_newinfo;
+
        /* And one copy for every other CPU */
        for_each_possible_cpu(i)
                if (newinfo->entries[i] && newinfo->entries[i] != entry1)
index 9f3924c4905e213fcd2c9dcf23f011eadbb00e92..11c167118e87fa87ecfd7898a6190c06fd24d7a0 100644 (file)
@@ -1780,7 +1780,7 @@ static inline int __mkroute_input(struct sk_buff *skb,
 #endif
        if (in_dev->cnf.no_policy)
                rth->u.dst.flags |= DST_NOPOLICY;
-       if (in_dev->cnf.no_xfrm)
+       if (out_dev->cnf.no_xfrm)
                rth->u.dst.flags |= DST_NOXFRM;
        rth->fl.fl4_dst = daddr;
        rth->rt_dst     = daddr;
index 9304034c0c471cf376fb13e5fe41754ea707c2be..c701f6abbfc14a3ab05ba52c7469dc8cb5875df8 100644 (file)
@@ -4235,7 +4235,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
                tp->copied_seq = tp->rcv_nxt;
-               mb();
+               smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
 
                security_inet_conn_established(sk, skb);
@@ -4483,7 +4483,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
-                               mb();
+                               smp_mb();
                                tcp_set_state(sk, TCP_ESTABLISHED);
                                sk->sk_state_change(sk);
 
index 6dddf59c1fb934ce9af25f2060ff8ce9b7a6556b..4a3889dd194352a0e26f1f1c1b486ccad4c36a5c 100644 (file)
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = {
        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
                                            (unsigned long)&tcp_death_row),
        .twkill_work    = __WORK_INITIALIZER(tcp_death_row.twkill_work,
-                                            inet_twdr_twkill_work,
-                                            &tcp_death_row),
+                                            inet_twdr_twkill_work),
 /* Short-time timewait calendar */
 
        .twcal_hand     = -1,
index d4107bb701b58ba4492c68b51d902fb6e286b05a..fb9f69c616f5c3dbf43af6db14a8178aba46419b 100644 (file)
@@ -274,6 +274,8 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
 
        if (likely(xdst->u.rt.idev))
                in_dev_put(xdst->u.rt.idev);
+       if (likely(xdst->u.rt.peer))
+               inet_putpeer(xdst->u.rt.peer);
        xfrm_dst_destroy(xdst);
 }
 
index 87c8f54872b77fc0d3f187ca32cb1989569d6513..e5cd83b2205d41e9a2689224b12d4d8dbfaec2e9 100644 (file)
@@ -720,10 +720,8 @@ snmp6_mib_free(void *ptr[2])
 {
        if (ptr == NULL)
                return;
-       if (ptr[0])
-               free_percpu(ptr[0]);
-       if (ptr[1])
-               free_percpu(ptr[1]);
+       free_percpu(ptr[0]);
+       free_percpu(ptr[1]);
        ptr[0] = ptr[1] = NULL;
 }
 
index bf526115e5186ec00c88c81c517bf78dac35a831..96d8310ae9c822ed99f39899b49838e3318f56e0 100644 (file)
@@ -50,7 +50,7 @@
 
 struct rt6_statistics  rt6_stats;
 
-static kmem_cache_t * fib6_node_kmem __read_mostly;
+static struct kmem_cache * fib6_node_kmem __read_mostly;
 
 enum fib_walk_state_t
 {
@@ -150,7 +150,7 @@ static __inline__ struct fib6_node * node_alloc(void)
 {
        struct fib6_node *fn;
 
-       if ((fn = kmem_cache_alloc(fib6_node_kmem, SLAB_ATOMIC)) != NULL)
+       if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL)
                memset(fn, 0, sizeof(struct fib6_node));
 
        return fn;
index e05ecbb1412ddaa91777333cf358217eb3e08be7..e9212c7ff5cf9cd38d0599b8a8bb479ab626ce27 100644 (file)
@@ -624,13 +624,13 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                skb_shinfo(skb)->frag_list = NULL;
                /* BUILD HEADER */
 
+               *prevhdr = NEXTHDR_FRAGMENT;
                tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC);
                if (!tmp_hdr) {
                        IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
                        return -ENOMEM;
                }
 
-               *prevhdr = NEXTHDR_FRAGMENT;
                __skb_pull(skb, hlen);
                fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
                skb->nh.raw = __skb_push(skb, hlen);
index f63fb86d7c7b56732a108bcf1cfeae6b9f81472b..4eec4b3988b824af9b763cb615d4e2ee148d9127 100644 (file)
@@ -440,6 +440,13 @@ mark_source_chains(struct xt_table_info *newinfo,
                            && unconditional(&e->ipv6)) {
                                unsigned int oldpos, size;
 
+                               if (t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+                                       return 0;
+                               }
+
                                /* Return: backtrack through the last
                                   big jump. */
                                do {
@@ -477,6 +484,13 @@ mark_source_chains(struct xt_table_info *newinfo,
                                if (strcmp(t->target.u.user.name,
                                           IP6T_STANDARD_TARGET) == 0
                                    && newpos >= 0) {
+                                       if (newpos > newinfo->size -
+                                               sizeof(struct ip6t_entry)) {
+                                               duprintf("mark_source_chains: "
+                                                       "bad verdict (%i)\n",
+                                                               newpos);
+                                               return 0;
+                                       }
                                        /* This a jump; chase it. */
                                        duprintf("Jump rule %u -> %u\n",
                                                 pos, newpos);
@@ -508,27 +522,6 @@ cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
        return 0;
 }
 
-static inline int
-standard_check(const struct ip6t_entry_target *t,
-              unsigned int max_offset)
-{
-       struct ip6t_standard_target *targ = (void *)t;
-
-       /* Check standard info. */
-       if (targ->verdict >= 0
-           && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
-               duprintf("ip6t_standard_check: bad verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
-       if (targ->verdict < -NF_MAX_VERDICT - 1) {
-               duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
-                        targ->verdict);
-               return 0;
-       }
-       return 1;
-}
-
 static inline int
 check_match(struct ip6t_entry_match *m,
            const char *name,
@@ -616,12 +609,7 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
        if (ret)
                goto err;
 
-       if (t->u.kernel.target == &ip6t_standard_target) {
-               if (!standard_check(t, size)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-       } else if (t->u.kernel.target->checkentry
+       if (t->u.kernel.target->checkentry
                   && !t->u.kernel.target->checkentry(name, e, target, t->data,
                                                      e->comefrom)) {
                duprintf("ip_tables: check failed for `%s'.\n",
@@ -758,17 +746,19 @@ translate_table(const char *name,
                }
        }
 
+       if (!mark_source_chains(newinfo, valid_hooks, entry0))
+               return -ELOOP;
+
        /* Finally, each sanity check must pass */
        i = 0;
        ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
                                check_entry, name, size, &i);
 
-       if (ret != 0)
-               goto cleanup;
-
-       ret = -ELOOP;
-       if (!mark_source_chains(newinfo, valid_hooks, entry0))
-               goto cleanup;
+       if (ret != 0) {
+               IP6T_ENTRY_ITERATE(entry0, newinfo->size,
+                                  cleanup_entry, &i);
+               return ret;
+       }
 
        /* And one copy for every other CPU */
        for_each_possible_cpu(i) {
@@ -777,9 +767,6 @@ translate_table(const char *name,
        }
 
        return 0;
-cleanup:
-       IP6T_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
-       return ret;
 }
 
 /* Gets counters. */
index c2e629d6aea40e5519c3b865d62e1a092e6c9b60..4ae1b19ada5d3eb9e3c3c7f0b1b7a8030002eacb 100644 (file)
@@ -854,7 +854,8 @@ back_from_confirm:
        }
 done:
        dst_release(dst);
-       release_sock(sk);
+       if (!inet->hdrincl)
+               release_sock(sk);
 out:   
        fl6_sock_release(flowlabel);
        return err<0?err:len;
index 01a5c52a2be324748d0e037e78d5bb7e226f3a90..12e426b9aacd19d2b03e3d35bfc230dcd3c42897 100644 (file)
@@ -50,7 +50,7 @@ static u32 xfrm6_tunnel_spi;
 #define XFRM6_TUNNEL_SPI_MIN   1
 #define XFRM6_TUNNEL_SPI_MAX   0xffffffff
 
-static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
+static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
 
 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
@@ -180,7 +180,7 @@ try_next_2:;
        spi = 0;
        goto out;
 alloc_spi:
-       x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
+       x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
        if (!x6spi)
                goto out;
 
index d50a02030ad72dbf1598a87d24812203241a36a4..262bda808d96762b2f96c9dc149f5460be4c1e0c 100644 (file)
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty);
 static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
 static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
 static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
 static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
 static void ircomm_tty_stop(struct tty_struct *tty);
 
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
                self->flow = FLOW_STOP;
 
                self->line = line;
-               INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+               INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
                self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
                self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
                self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
 }
 
 /*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
  *
  *    We use this routine to give the write wakeup to the user at at a
  *    safe time (as fast as possible after write have completed). This 
  *    can be compared to the Tx interrupt.
  */
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
 {
-       struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+       struct ircomm_tty_cb *self =
+               container_of(work, struct ircomm_tty_cb, tqueue);
        struct tty_struct *tty;
        unsigned long flags;
        struct sk_buff *skb, *ctrl_skb;
index 252f11012566ca1758851ef9489785ca8fe95ce7..03504f3e4990cb82bb8592845d79c7525d172b09 100644 (file)
@@ -1100,7 +1100,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
                        return -ENOMEM;
 
                /* Reserve space for MUX_CONTROL and LAP header */
-               skb_reserve(tx_skb, TTP_MAX_HEADER);
+               skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
        } else {
                tx_skb = userdata;
                /*
@@ -1349,7 +1349,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
                        return -ENOMEM;
 
                /* Reserve space for MUX_CONTROL and LAP header */
-               skb_reserve(tx_skb, TTP_MAX_HEADER);
+               skb_reserve(tx_skb, TTP_MAX_HEADER + TTP_SAR_HEADER);
        } else {
                tx_skb = userdata;
                /*
index 0e1dbfbb9b109ae6e2be92bbac0a592ec4a2ecb0..5dd5094659a186655d88a50a44ba2849a44bc776 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/proc_fs.h>
 #include <linux/init.h>
 #include <net/xfrm.h>
+#include <linux/audit.h>
 
 #include <net/sock.h>
 
@@ -1420,6 +1421,9 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
        else
                err = xfrm_state_update(x);
 
+       xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+                      AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
+
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
                __xfrm_state_put(x);
@@ -1460,8 +1464,12 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
                err = -EPERM;
                goto out;
        }
-       
+
        err = xfrm_state_delete(x);
+
+       xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+                      AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
        if (err < 0)
                goto out;
 
@@ -1637,12 +1645,15 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
 {
        unsigned proto;
        struct km_event c;
+       struct xfrm_audit audit_info;
 
        proto = pfkey_satype2proto(hdr->sadb_msg_satype);
        if (proto == 0)
                return -EINVAL;
 
-       xfrm_state_flush(proto);
+       audit_info.loginuid = audit_get_loginuid(current->audit_context);
+       audit_info.secid = 0;
+       xfrm_state_flush(proto, &audit_info);
        c.data.proto = proto;
        c.seq = hdr->sadb_msg_seq;
        c.pid = hdr->sadb_msg_pid;
@@ -2205,6 +2216,9 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
        err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp,
                                 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
 
+       xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+                      AUDIT_MAC_IPSEC_ADDSPD, err ? 0 : 1, xp, NULL);
+
        if (err)
                goto out;
 
@@ -2282,6 +2296,10 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
        xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir-1,
                                   &sel, tmp.security, 1);
        security_xfrm_policy_free(&tmp);
+
+       xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+                      AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
+
        if (xp == NULL)
                return -ENOENT;
 
@@ -2416,8 +2434,11 @@ static int key_notify_policy_flush(struct km_event *c)
 static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
 {
        struct km_event c;
+       struct xfrm_audit audit_info;
 
-       xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN);
+       audit_info.loginuid = audit_get_loginuid(current->audit_context);
+       audit_info.secid = 0;
+       xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN, &audit_info);
        c.data.type = XFRM_POLICY_TYPE_MAIN;
        c.event = XFRM_MSG_FLUSHPOLICY;
        c.pid = hdr->sadb_msg_pid;
index eaa0f8a1adb61422629284ea0d156fe32597b364..9b02ec4012fb3f4ab515d089cd4f3b1b14a20808 100644 (file)
@@ -108,7 +108,7 @@ static struct {
        size_t size;
 
        /* slab cache pointer */
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        /* allocated slab cache + modules which uses this slab cache */
        int use;
@@ -147,7 +147,7 @@ int nf_conntrack_register_cache(u_int32_t features, const char *name,
 {
        int ret = 0;
        char *cache_name;
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
 
        DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
               features, name, size);
@@ -226,7 +226,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
 void nf_conntrack_unregister_cache(u_int32_t features)
 {
-       kmem_cache_t *cachep;
+       struct kmem_cache *cachep;
        char *name;
 
        /*
@@ -1093,7 +1093,7 @@ static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
                           get_order(sizeof(struct list_head) * size));
 }
 
-void nf_conntrack_flush()
+void nf_conntrack_flush(void)
 {
        nf_ct_iterate_cleanup(kill_all, NULL);
 }
index 588d379370466f0f71cffb23acf8363c2f6795f1..9cbf926cdd14fb0d6af92917339e1cd1d3f23b23 100644 (file)
@@ -29,7 +29,7 @@
 LIST_HEAD(nf_conntrack_expect_list);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_list);
 
-kmem_cache_t *nf_conntrack_expect_cachep __read_mostly;
+struct kmem_cache *nf_conntrack_expect_cachep __read_mostly;
 static unsigned int nf_conntrack_expect_next_id;
 
 /* nf_conntrack_expect helper functions */
@@ -91,25 +91,28 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_find_get);
 struct nf_conntrack_expect *
 find_expectation(const struct nf_conntrack_tuple *tuple)
 {
-       struct nf_conntrack_expect *i;
+       struct nf_conntrack_expect *exp;
+
+       exp = __nf_conntrack_expect_find(tuple);
+       if (!exp)
+               return NULL;
 
-       list_for_each_entry(i, &nf_conntrack_expect_list, list) {
        /* If master is not in hash table yet (ie. packet hasn't left
           this machine yet), how can other end know about expected?
           Hence these are not the droids you are looking for (if
           master ct never got confirmed, we'd hold a reference to it
           and weird things would happen to future packets). */
-               if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
-                   && nf_ct_is_confirmed(i->master)) {
-                       if (i->flags & NF_CT_EXPECT_PERMANENT) {
-                               atomic_inc(&i->use);
-                               return i;
-                       } else if (del_timer(&i->timeout)) {
-                               nf_ct_unlink_expect(i);
-                               return i;
-                       }
-               }
+       if (!nf_ct_is_confirmed(exp->master))
+               return NULL;
+
+       if (exp->flags & NF_CT_EXPECT_PERMANENT) {
+               atomic_inc(&exp->use);
+               return exp;
+       } else if (del_timer(&exp->timeout)) {
+               nf_ct_unlink_expect(exp);
+               return exp;
        }
+
        return NULL;
 }
 
index a98de0b54d6514accd49dd7b23c13a027fb17aa4..a5a6e192ac2d07ae5fe188b81777b75e645f01cc 100644 (file)
@@ -92,7 +92,7 @@ struct xt_hashlimit_htable {
 static DEFINE_SPINLOCK(hashlimit_lock);        /* protects htables list */
 static DEFINE_MUTEX(hlimit_mutex);     /* additional checkentry protection */
 static HLIST_HEAD(hashlimit_htables);
-static kmem_cache_t *hashlimit_cachep __read_mostly;
+static struct kmem_cache *hashlimit_cachep __read_mostly;
 
 static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
 {
index b9b03747c1f39e5882c702e0cdde95c6a61dcfbd..548e4e6e698f041f3802a28339b4ed69ba971b7c 100644 (file)
@@ -143,6 +143,13 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
                goto errout;
        }
 
+       if (ops->dumpit)
+               ops->flags |= GENL_CMD_CAP_DUMP;
+       if (ops->doit)
+               ops->flags |= GENL_CMD_CAP_DO;
+       if (ops->policy)
+               ops->flags |= GENL_CMD_CAP_HASPOL;
+
        genl_lock();
        list_add_tail(&ops->ops_list, &family->ops_list);
        genl_unlock();
@@ -387,7 +394,7 @@ static void genl_rcv(struct sock *sk, int len)
 static struct genl_family genl_ctrl = {
        .id = GENL_ID_CTRL,
        .name = "nlctrl",
-       .version = 0x1,
+       .version = 0x2,
        .maxattr = CTRL_ATTR_MAX,
 };
 
@@ -425,15 +432,6 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
                        NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
                        NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
 
-                       if (ops->policy)
-                               NLA_PUT_FLAG(skb, CTRL_ATTR_OP_POLICY);
-
-                       if (ops->doit)
-                               NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DOIT);
-
-                       if (ops->dumpit)
-                               NLA_PUT_FLAG(skb, CTRL_ATTR_OP_DUMPIT);
-
                        nla_nest_end(skb, nest);
                }
 
index 08e68b67bbf642cedac2b537b79028c7f157bfaa..da73e8a8c18de58377376237907b32cf3baee35e 100644 (file)
@@ -660,7 +660,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
        sll->sll_ifindex = dev->ifindex;
 
        h->tp_status = status;
-       mb();
+       smp_mb();
 
        {
                struct page *p_start, *p_end;
index dada34a77b2194ab9750d2cdc518032f0fa413df..49effd92144e2908ea95ce6ecdff1fb4be7a6940 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/freezer.h>
 #include <rxrpc/krxiod.h>
 #include <rxrpc/transport.h>
 #include <rxrpc/peer.h>
index cea4eb5e2497a7c6e89234ef0a7e60c8bc863130..3ab0f77409f49c79de6c65dc361962bf14be1c94 100644 (file)
@@ -27,6 +27,7 @@
 #include <rxrpc/call.h>
 #include <linux/udp.h>
 #include <linux/ip.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include "internal.h"
 
index 3e7466900bd4e1260728ed63759d60794a88c69e..9a9b6132dba4840755e84a17e286a0b9da161996 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/freezer.h>
 #include <rxrpc/rxrpc.h>
 #include <rxrpc/krxtimod.h>
 #include <asm/errno.h>
index f59a2c4aa039b463e6c4c7d4fc867ce6da8a7dcb..c797d6ada7de06948ef1fab1e225a224ffdc057d 100644 (file)
@@ -101,9 +101,10 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
        struct fw_head *head = (struct fw_head*)tp->root;
        struct fw_filter *f;
        int r;
-       u32 id = skb->mark & head->mask;
+       u32 id = skb->mark;
 
        if (head != NULL) {
+               id &= head->mask;
                for (f=head->ht[fw_hash(id)]; f; f=f->next) {
                        if (f->id == id) {
                                *res = f->res;
index 39471d3b31b974b93e7710de9a5156dc047f554f..ad0057db0f91884b39a393b06759bdc383c9776b 100644 (file)
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
 
 
 /* 1st Level Abstractions. */
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        /* Create an input queue.  */
        sctp_inq_init(&asoc->base.inqueue);
-       sctp_inq_set_th_handler(&asoc->base.inqueue,
-                                   (void (*)(void *))sctp_assoc_bh_rcv,
-                                   asoc);
+       sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
 
        /* Create an output queue.  */
        sctp_outq_init(asoc, &asoc->outqueue);
@@ -946,8 +944,11 @@ out:
 }
 
 /* Do delayed input processing.  This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
 {
+       struct sctp_association *asoc =
+               container_of(work, struct sctp_association,
+                            base.inqueue.immediate);
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sock *sk;
index 33a42e90c32f509f7dfb06827ff1a7296d60ddf7..129756908da49992b5a42e07b84f7521a778e891 100644 (file)
@@ -61,7 +61,7 @@
 #include <net/sctp/sm.h>
 
 /* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
 
 /*
  * Initialize the base fields of the endpoint structure.
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        sctp_inq_init(&ep->base.inqueue);
 
        /* Set its top-half handler */
-       sctp_inq_set_th_handler(&ep->base.inqueue,
-                               (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+       sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
 
        /* Initialize the bind addr area */
        sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
 /* Do delayed input processing.  This is scheduled by sctp_rcv().
  * This may be called on BH or task time.
  */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
 {
+       struct sctp_endpoint *ep =
+               container_of(work, struct sctp_endpoint,
+                            base.inqueue.immediate);
        struct sctp_association *asoc;
        struct sock *sk;
        struct sctp_transport *transport;
index cf6deed7e8497241cec4109c41c17130bc78906e..71b07466e880415ce812ca0b55e4f90e37f67ab9 100644 (file)
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue)
        queue->in_progress = NULL;
 
        /* Create a task for delivering data.  */
-       INIT_WORK(&queue->immediate, NULL, NULL);
+       INIT_WORK(&queue->immediate, NULL);
 
        queue->malloced = 0;
 }
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
         * on the BH related data structures.
         */
        list_add_tail(&chunk->list, &q->in_chunk_list);
-       q->immediate.func(q->immediate.data);
+       q->immediate.func(&q->immediate);
 }
 
 /* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
  * The intent is that this routine will pull stuff out of the
  * inqueue and process it.
  */
-void sctp_inq_set_th_handler(struct sctp_inq *q,
-                                void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
 {
-       INIT_WORK(&q->immediate, callback, arg);
+       INIT_WORK(&q->immediate, callback);
 }
 
index 11f3b549f4a4ef893f4cf2c05a93f782b5f5446b..f2ba8615895b4551ced90fab4f101774492bf351 100644 (file)
@@ -79,8 +79,8 @@ static struct sctp_pf *sctp_pf_inet_specific;
 static struct sctp_af *sctp_af_v4_specific;
 static struct sctp_af *sctp_af_v6_specific;
 
-kmem_cache_t *sctp_chunk_cachep __read_mostly;
-kmem_cache_t *sctp_bucket_cachep __read_mostly;
+struct kmem_cache *sctp_chunk_cachep __read_mostly;
+struct kmem_cache *sctp_bucket_cachep __read_mostly;
 
 /* Return the address of the control sock. */
 struct sock *sctp_get_ctl_sock(void)
index 04954e5f6846c2d16763ce1c0bcf2e6900957df7..30927d3a597f891e6c4a7dc852fa1b7a9802248a 100644 (file)
@@ -65,7 +65,7 @@
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-extern kmem_cache_t *sctp_chunk_cachep;
+extern struct kmem_cache *sctp_chunk_cachep;
 
 SCTP_STATIC
 struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
@@ -979,7 +979,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb,
 {
        struct sctp_chunk *retval;
 
-       retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC);
+       retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
 
        if (!retval)
                goto nodata;
index 02b27145b279420891ed29393ea92b62f56aa3b9..1e8132b8c4d98e71f3d4c7a1c48bea3b8f6ae7d4 100644 (file)
@@ -107,7 +107,7 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
                              struct sctp_association *, sctp_socket_type_t);
 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
 
-extern kmem_cache_t *sctp_bucket_cachep;
+extern struct kmem_cache *sctp_bucket_cachep;
 
 /* Get the sndbuf space available at the time on the association.  */
 static inline int sctp_wspace(struct sctp_association *asoc)
@@ -4989,7 +4989,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
 {
        struct sctp_bind_bucket *pp;
 
-       pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC);
+       pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
        SCTP_DBG_OBJCNT_INC(bind_bucket);
        if (pp) {
                pp->port = snum;
index e8db54702a6913c98f5354bcf11c75b5d1dbb466..29ea1de43ecb3ec8bfe6c7ef13369c1e557e6530 100644 (file)
@@ -230,13 +230,13 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr,
 
 #define SOCKFS_MAGIC 0x534F434B
 
-static kmem_cache_t *sock_inode_cachep __read_mostly;
+static struct kmem_cache *sock_inode_cachep __read_mostly;
 
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
        struct socket_alloc *ei;
 
-       ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL);
+       ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        init_waitqueue_head(&ei->socket.wait);
@@ -257,7 +257,7 @@ static void sock_destroy_inode(struct inode *inode)
                        container_of(inode, struct socket_alloc, vfs_inode));
 }
 
-static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
+static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
 {
        struct socket_alloc *ei = (struct socket_alloc *)foo;
 
@@ -305,7 +305,14 @@ static struct file_system_type sock_fs_type = {
 
 static int sockfs_delete_dentry(struct dentry *dentry)
 {
-       return 1;
+       /*
+        * At creation time, we pretended this dentry was hashed
+        * (by clearing DCACHE_UNHASHED bit in d_flags)
+        * At delete time, we restore the truth : not hashed.
+        * (so that dput() can proceed correctly)
+        */
+       dentry->d_flags |= DCACHE_UNHASHED;
+       return 0;
 }
 static struct dentry_operations sockfs_dentry_operations = {
        .d_delete = sockfs_delete_dentry,
@@ -353,14 +360,20 @@ static int sock_attach_fd(struct socket *sock, struct file *file)
 
        this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino);
        this.name = name;
-       this.hash = SOCK_INODE(sock)->i_ino;
+       this.hash = 0;
 
        file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this);
        if (unlikely(!file->f_dentry))
                return -ENOMEM;
 
        file->f_dentry->d_op = &sockfs_dentry_operations;
-       d_add(file->f_dentry, SOCK_INODE(sock));
+       /*
+        * We dont want to push this dentry into global dentry hash table.
+        * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+        * This permits a working /proc/$pid/fd/XXX on sockets
+        */
+       file->f_dentry->d_flags &= ~DCACHE_UNHASHED;
+       d_instantiate(file->f_dentry, SOCK_INODE(sock));
        file->f_vfsmnt = mntget(sock_mnt);
        file->f_mapping = file->f_dentry->d_inode->i_mapping;
 
index e5a84a482e57f7a3c644880084cb9afd817d1640..a02ecc1f230dddb839ff85350f094c39b0ad2f28 100644 (file)
@@ -68,7 +68,7 @@ static struct rpc_credops gss_credops;
 #define GSS_CRED_SLACK         1024            /* XXX: unused */
 /* length of a krb5 verifier (48), plus data added before arguments when
  * using integrity (two 4-byte integers): */
-#define GSS_VERF_SLACK         56
+#define GSS_VERF_SLACK         100
 
 /* XXX this define must match the gssd define
 * as it is passed to gssd to signal the use of
@@ -94,46 +94,6 @@ struct gss_auth {
 static void gss_destroy_ctx(struct gss_cl_ctx *);
 static struct rpc_pipe_ops gss_upcall_ops;
 
-void
-print_hexl(u32 *p, u_int length, u_int offset)
-{
-       u_int i, j, jm;
-       u8 c, *cp;
-       
-       dprintk("RPC: print_hexl: length %d\n",length);
-       dprintk("\n");
-       cp = (u8 *) p;
-       
-       for (i = 0; i < length; i += 0x10) {
-               dprintk("  %04x: ", (u_int)(i + offset));
-               jm = length - i;
-               jm = jm > 16 ? 16 : jm;
-               
-               for (j = 0; j < jm; j++) {
-                       if ((j % 2) == 1)
-                               dprintk("%02x ", (u_int)cp[i+j]);
-                       else
-                               dprintk("%02x", (u_int)cp[i+j]);
-               }
-               for (; j < 16; j++) {
-                       if ((j % 2) == 1)
-                               dprintk("   ");
-                       else
-                               dprintk("  ");
-               }
-               dprintk(" ");
-               
-               for (j = 0; j < jm; j++) {
-                       c = cp[i+j];
-                       c = isprint(c) ? c : '.';
-                       dprintk("%c", c);
-               }
-               dprintk("\n");
-       }
-}
-
-EXPORT_SYMBOL(print_hexl);
-
 static inline struct gss_cl_ctx *
 gss_get_ctx(struct gss_cl_ctx *ctx)
 {
index e11a40b25cce68d40a2649cb999793d235fd455a..d926cda8862399de9d73d94c30063004a7c6b3f5 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/sunrpc/gss_krb5.h>
+#include <linux/sunrpc/xdr.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -61,9 +62,6 @@ krb5_encrypt(
        u8 local_iv[16] = {0};
        struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
-       dprintk("RPC:      krb5_encrypt: input data:\n");
-       print_hexl((u32 *)in, length, 0);
-
        if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
@@ -80,12 +78,9 @@ krb5_encrypt(
        sg_set_buf(sg, out, length);
 
        ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
-
-       dprintk("RPC:      krb5_encrypt: output data:\n");
-       print_hexl((u32 *)out, length, 0);
 out:
        dprintk("RPC:      krb5_encrypt returns %d\n",ret);
-       return(ret);
+       return ret;
 }
 
 EXPORT_SYMBOL(krb5_encrypt);
@@ -103,9 +98,6 @@ krb5_decrypt(
        u8 local_iv[16] = {0};
        struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
 
-       dprintk("RPC:      krb5_decrypt: input data:\n");
-       print_hexl((u32 *)in, length, 0);
-
        if (length % crypto_blkcipher_blocksize(tfm) != 0)
                goto out;
 
@@ -121,82 +113,13 @@ krb5_decrypt(
        sg_set_buf(sg, out, length);
 
        ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
-
-       dprintk("RPC:      krb5_decrypt: output_data:\n");
-       print_hexl((u32 *)out, length, 0);
 out:
        dprintk("RPC:      gss_k5decrypt returns %d\n",ret);
-       return(ret);
+       return ret;
 }
 
 EXPORT_SYMBOL(krb5_decrypt);
 
-static int
-process_xdr_buf(struct xdr_buf *buf, int offset, int len,
-               int (*actor)(struct scatterlist *, void *), void *data)
-{
-       int i, page_len, thislen, page_offset, ret = 0;
-       struct scatterlist      sg[1];
-
-       if (offset >= buf->head[0].iov_len) {
-               offset -= buf->head[0].iov_len;
-       } else {
-               thislen = buf->head[0].iov_len - offset;
-               if (thislen > len)
-                       thislen = len;
-               sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
-               ret = actor(sg, data);
-               if (ret)
-                       goto out;
-               offset = 0;
-               len -= thislen;
-       }
-       if (len == 0)
-               goto out;
-
-       if (offset >= buf->page_len) {
-               offset -= buf->page_len;
-       } else {
-               page_len = buf->page_len - offset;
-               if (page_len > len)
-                       page_len = len;
-               len -= page_len;
-               page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
-               i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
-               thislen = PAGE_CACHE_SIZE - page_offset;
-               do {
-                       if (thislen > page_len)
-                               thislen = page_len;
-                       sg->page = buf->pages[i];
-                       sg->offset = page_offset;
-                       sg->length = thislen;
-                       ret = actor(sg, data);
-                       if (ret)
-                               goto out;
-                       page_len -= thislen;
-                       i++;
-                       page_offset = 0;
-                       thislen = PAGE_CACHE_SIZE;
-               } while (page_len != 0);
-               offset = 0;
-       }
-       if (len == 0)
-               goto out;
-
-       if (offset < buf->tail[0].iov_len) {
-               thislen = buf->tail[0].iov_len - offset;
-               if (thislen > len)
-                       thislen = len;
-               sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
-               ret = actor(sg, data);
-               len -= thislen;
-       }
-       if (len != 0)
-               ret = -EINVAL;
-out:
-       return ret;
-}
-
 static int
 checksummer(struct scatterlist *sg, void *data)
 {
@@ -207,23 +130,13 @@ checksummer(struct scatterlist *sg, void *data)
 
 /* checksum the plaintext data and hdrlen bytes of the token header */
 s32
-make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
+make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
                   int body_offset, struct xdr_netobj *cksum)
 {
-       char                            *cksumname;
        struct hash_desc                desc; /* XXX add to ctx? */
        struct scatterlist              sg[1];
        int err;
 
-       switch (cksumtype) {
-               case CKSUMTYPE_RSA_MD5:
-                       cksumname = "md5";
-                       break;
-               default:
-                       dprintk("RPC:      krb5_make_checksum:"
-                               " unsupported checksum %d", cksumtype);
-                       return GSS_S_FAILURE;
-       }
        desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(desc.tfm))
                return GSS_S_FAILURE;
@@ -237,7 +150,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
        err = crypto_hash_update(&desc, sg, hdrlen);
        if (err)
                goto out;
-       err = process_xdr_buf(body, body_offset, body->len - body_offset,
+       err = xdr_process_buf(body, body_offset, body->len - body_offset,
                              checksummer, &desc);
        if (err)
                goto out;
@@ -335,7 +248,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        desc.fragno = 0;
        desc.fraglen = 0;
 
-       ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
+       ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
        return ret;
 }
 
@@ -401,7 +314,7 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        desc.desc.flags = 0;
        desc.fragno = 0;
        desc.fraglen = 0;
-       return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
+       return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
 }
 
 EXPORT_SYMBOL(gss_decrypt_xdr_buf);
index 754b8cd6439f20e4157069ff9bbc5197b3e3010f..05d4bee86fc06d32c0ab559a936bfff92b8623ae 100644 (file)
@@ -129,6 +129,7 @@ gss_import_sec_context_kerberos(const void *p,
 {
        const void *end = (const void *)((const char *)p + len);
        struct  krb5_ctx *ctx;
+       int tmp;
 
        if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
                goto out_err;
@@ -136,18 +137,23 @@ gss_import_sec_context_kerberos(const void *p,
        p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
        if (IS_ERR(p))
                goto out_err_free_ctx;
-       p = simple_get_bytes(p, end, &ctx->seed_init, sizeof(ctx->seed_init));
-       if (IS_ERR(p))
+       /* The downcall format was designed before we completely understood
+        * the uses of the context fields; so it includes some stuff we
+        * just give some minimal sanity-checking, and some we ignore
+        * completely (like the next twenty bytes): */
+       if (unlikely(p + 20 > end || p + 20 < p))
                goto out_err_free_ctx;
-       p = simple_get_bytes(p, end, ctx->seed, sizeof(ctx->seed));
+       p += 20;
+       p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
        if (IS_ERR(p))
                goto out_err_free_ctx;
-       p = simple_get_bytes(p, end, &ctx->signalg, sizeof(ctx->signalg));
-       if (IS_ERR(p))
+       if (tmp != SGN_ALG_DES_MAC_MD5)
                goto out_err_free_ctx;
-       p = simple_get_bytes(p, end, &ctx->sealalg, sizeof(ctx->sealalg));
+       p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
        if (IS_ERR(p))
                goto out_err_free_ctx;
+       if (tmp != SEAL_ALG_DES)
+               goto out_err_free_ctx;
        p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
        if (IS_ERR(p))
                goto out_err_free_ctx;
index 08601ee4cd7311d45c4a1146386ce22a0d96be76..d0bb5064f8c5ae1b0578b8f635cea5d5df0695a4 100644 (file)
@@ -77,7 +77,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
                struct xdr_netobj *token)
 {
        struct krb5_ctx         *ctx = gss_ctx->internal_ctx_id;
-       s32                     checksum_type;
        char                    cksumdata[16];
        struct xdr_netobj       md5cksum = {.len = 0, .data = cksumdata};
        unsigned char           *ptr, *krb5_hdr, *msg_start;
@@ -88,21 +87,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
 
        now = get_seconds();
 
-       switch (ctx->signalg) {
-               case SGN_ALG_DES_MAC_MD5:
-                       checksum_type = CKSUMTYPE_RSA_MD5;
-                       break;
-               default:
-                       dprintk("RPC:      gss_krb5_seal: ctx->signalg %d not"
-                               " supported\n", ctx->signalg);
-                       goto out_err;
-       }
-       if (ctx->sealalg != SEAL_ALG_NONE && ctx->sealalg != SEAL_ALG_DES) {
-               dprintk("RPC:      gss_krb5_seal: ctx->sealalg %d not supported\n",
-                       ctx->sealalg);
-               goto out_err;
-       }
-
        token->len = g_token_size(&ctx->mech_used, 22);
 
        ptr = token->data;
@@ -115,37 +99,26 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
        krb5_hdr = ptr - 2;
        msg_start = krb5_hdr + 24;
 
-       *(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg);
+       *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
        memset(krb5_hdr + 4, 0xff, 4);
 
-       if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
-                       goto out_err;
-
-       switch (ctx->signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
-                                 md5cksum.data, md5cksum.len))
-                       goto out_err;
-               memcpy(krb5_hdr + 16,
-                      md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
-                      KRB5_CKSUM_LENGTH);
-
-               dprintk("RPC:      make_seal_token: cksum data: \n");
-               print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
-               break;
-       default:
-               BUG();
-       }
+       if (make_checksum("md5", krb5_hdr, 8, text, 0, &md5cksum))
+               return GSS_S_FAILURE;
+
+       if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
+                         md5cksum.data, md5cksum.len))
+               return GSS_S_FAILURE;
+
+       memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+              KRB5_CKSUM_LENGTH);
 
        spin_lock(&krb5_seq_lock);
        seq_send = ctx->seq_send++;
        spin_unlock(&krb5_seq_lock);
 
-       if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
-                              seq_send, krb5_hdr + 16, krb5_hdr + 8)))
-               goto out_err;
+       if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
+                              ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))
+               return GSS_S_FAILURE;
 
-       return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
-out_err:
-       return GSS_S_FAILURE;
+       return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
index 0828cf64100f977d241dac71d393a0148aa8f09a..87f8977ccece78861cd8d65c1e83007930b82cc0 100644 (file)
@@ -78,7 +78,6 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
        struct krb5_ctx         *ctx = gss_ctx->internal_ctx_id;
        int                     signalg;
        int                     sealalg;
-       s32                     checksum_type;
        char                    cksumdata[16];
        struct xdr_netobj       md5cksum = {.len = 0, .data = cksumdata};
        s32                     now;
@@ -86,96 +85,54 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
        s32                     seqnum;
        unsigned char           *ptr = (unsigned char *)read_token->data;
        int                     bodysize;
-       u32                     ret = GSS_S_DEFECTIVE_TOKEN;
 
        dprintk("RPC:      krb5_read_token\n");
 
        if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
                                        read_token->len))
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
        if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
            (*ptr++ != ( KG_TOK_MIC_MSG    &0xff))   )
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
        /* XXX sanity-check bodysize?? */
 
-       /* get the sign and seal algorithms */
-
        signalg = ptr[0] + (ptr[1] << 8);
-       sealalg = ptr[2] + (ptr[3] << 8);
+       if (signalg != SGN_ALG_DES_MAC_MD5)
+               return GSS_S_DEFECTIVE_TOKEN;
 
-       /* Sanity checks */
+       sealalg = ptr[2] + (ptr[3] << 8);
+       if (sealalg != SEAL_ALG_NONE)
+               return GSS_S_DEFECTIVE_TOKEN;
 
        if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
-               goto out;
-
-       if (sealalg != 0xffff)
-               goto out;
-
-       /* there are several mappings of seal algorithms to sign algorithms,
-          but few enough that we can try them all. */
-
-       if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
-           (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
-           (ctx->sealalg == SEAL_ALG_DES3KD &&
-            signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
-               goto out;
-
-       /* compute the checksum of the message */
-
-       /* initialize the the cksum */
-       switch (signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               checksum_type = CKSUMTYPE_RSA_MD5;
-               break;
-       default:
-               ret = GSS_S_DEFECTIVE_TOKEN;
-               goto out;
-       }
-
-       switch (signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               ret = make_checksum(checksum_type, ptr - 2, 8,
-                                        message_buffer, 0, &md5cksum);
-               if (ret)
-                       goto out;
-
-               ret = krb5_encrypt(ctx->seq, NULL, md5cksum.data,
-                                  md5cksum.data, 16);
-               if (ret)
-                       goto out;
-
-               if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
-                       ret = GSS_S_BAD_SIG;
-                       goto out;
-               }
-               break;
-       default:
-               ret = GSS_S_DEFECTIVE_TOKEN;
-               goto out;
-       }
+               return GSS_S_DEFECTIVE_TOKEN;
+
+       if (make_checksum("md5", ptr - 2, 8, message_buffer, 0, &md5cksum))
+               return GSS_S_FAILURE;
+
+       if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
+               return GSS_S_FAILURE;
+
+       if (memcmp(md5cksum.data + 8, ptr + 14, 8))
+               return GSS_S_BAD_SIG;
 
        /* it got through unscathed.  Make sure the context is unexpired */
 
        now = get_seconds();
 
-       ret = GSS_S_CONTEXT_EXPIRED;
        if (now > ctx->endtime)
-               goto out;
+               return GSS_S_CONTEXT_EXPIRED;
 
        /* do sequencing checks */
 
-       ret = GSS_S_BAD_SIG;
-       if ((ret = krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction,
-                                   &seqnum)))
-               goto out;
+       if (krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum))
+               return GSS_S_FAILURE;
 
        if ((ctx->initiate && direction != 0xff) ||
            (!ctx->initiate && direction != 0))
-               goto out;
+               return GSS_S_BAD_SIG;
 
-       ret = GSS_S_COMPLETE;
-out:
-       return ret;
+       return GSS_S_COMPLETE;
 }
index cc45c1605f80eb7bbc6f2382bb2c3952f0446ec9..fe25b3d898dc53951be0c84166df3eae02487817 100644 (file)
@@ -57,9 +57,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
                                        >>PAGE_CACHE_SHIFT;
                int offset = (buf->page_base + len - 1)
                                        & (PAGE_CACHE_SIZE - 1);
-               ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
+               ptr = kmap_atomic(buf->pages[last], KM_USER0);
                pad = *(ptr + offset);
-               kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
+               kunmap_atomic(ptr, KM_USER0);
                goto out;
        } else
                len -= buf->page_len;
@@ -120,7 +120,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
                struct xdr_buf *buf, struct page **pages)
 {
        struct krb5_ctx         *kctx = ctx->internal_ctx_id;
-       s32                     checksum_type;
        char                    cksumdata[16];
        struct xdr_netobj       md5cksum = {.len = 0, .data = cksumdata};
        int                     blocksize = 0, plainlen;
@@ -134,21 +133,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
 
        now = get_seconds();
 
-       switch (kctx->signalg) {
-               case SGN_ALG_DES_MAC_MD5:
-                       checksum_type = CKSUMTYPE_RSA_MD5;
-                       break;
-               default:
-                       dprintk("RPC:      gss_krb5_seal: kctx->signalg %d not"
-                               " supported\n", kctx->signalg);
-                       goto out_err;
-       }
-       if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
-               dprintk("RPC:      gss_krb5_seal: kctx->sealalg %d not supported\n",
-                       kctx->sealalg);
-               goto out_err;
-       }
-
        blocksize = crypto_blkcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
@@ -175,37 +159,27 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
        /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
        krb5_hdr = ptr - 2;
        msg_start = krb5_hdr + 24;
-       /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
 
-       *(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg);
+       *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
        memset(krb5_hdr + 4, 0xff, 4);
-       *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
+       *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
 
        make_confounder(msg_start, blocksize);
 
        /* XXXJBF: UGH!: */
        tmp_pages = buf->pages;
        buf->pages = pages;
-       if (make_checksum(checksum_type, krb5_hdr, 8, buf,
+       if (make_checksum("md5", krb5_hdr, 8, buf,
                                offset + headlen - blocksize, &md5cksum))
-               goto out_err;
+               return GSS_S_FAILURE;
        buf->pages = tmp_pages;
 
-       switch (kctx->signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
-                                 md5cksum.data, md5cksum.len))
-                       goto out_err;
-               memcpy(krb5_hdr + 16,
-                      md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
-                      KRB5_CKSUM_LENGTH);
-
-               dprintk("RPC:      make_seal_token: cksum data: \n");
-               print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
-               break;
-       default:
-               BUG();
-       }
+       if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+                         md5cksum.data, md5cksum.len))
+               return GSS_S_FAILURE;
+       memcpy(krb5_hdr + 16,
+              md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
+              KRB5_CKSUM_LENGTH);
 
        spin_lock(&krb5_seq_lock);
        seq_send = kctx->seq_send++;
@@ -215,15 +189,13 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
         * and encrypt at the same time: */
        if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
                               seq_send, krb5_hdr + 16, krb5_hdr + 8)))
-               goto out_err;
+               return GSS_S_FAILURE;
 
        if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
                                                                        pages))
-               goto out_err;
+               return GSS_S_FAILURE;
 
-       return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
-out_err:
-       return GSS_S_FAILURE;
+       return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
 }
 
 u32
@@ -232,7 +204,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
        struct krb5_ctx         *kctx = ctx->internal_ctx_id;
        int                     signalg;
        int                     sealalg;
-       s32                     checksum_type;
        char                    cksumdata[16];
        struct xdr_netobj       md5cksum = {.len = 0, .data = cksumdata};
        s32                     now;
@@ -240,7 +211,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
        s32                     seqnum;
        unsigned char           *ptr;
        int                     bodysize;
-       u32                     ret = GSS_S_DEFECTIVE_TOKEN;
        void                    *data_start, *orig_start;
        int                     data_len;
        int                     blocksize;
@@ -250,98 +220,58 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
        ptr = (u8 *)buf->head[0].iov_base + offset;
        if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
                                        buf->len - offset))
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
        if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
            (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
        /* XXX sanity-check bodysize?? */
 
        /* get the sign and seal algorithms */
 
        signalg = ptr[0] + (ptr[1] << 8);
-       sealalg = ptr[2] + (ptr[3] << 8);
+       if (signalg != SGN_ALG_DES_MAC_MD5)
+               return GSS_S_DEFECTIVE_TOKEN;
 
-       /* Sanity checks */
+       sealalg = ptr[2] + (ptr[3] << 8);
+       if (sealalg != SEAL_ALG_DES)
+               return GSS_S_DEFECTIVE_TOKEN;
 
        if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
-               goto out;
-
-       if (sealalg == 0xffff)
-               goto out;
-
-       /* in the current spec, there is only one valid seal algorithm per
-          key type, so a simple comparison is ok */
-
-       if (sealalg != kctx->sealalg)
-               goto out;
-
-       /* there are several mappings of seal algorithms to sign algorithms,
-          but few enough that we can try them all. */
-
-       if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
-           (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
-           (kctx->sealalg == SEAL_ALG_DES3KD &&
-            signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
        if (gss_decrypt_xdr_buf(kctx->enc, buf,
                        ptr + 22 - (unsigned char *)buf->head[0].iov_base))
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
-       /* compute the checksum of the message */
+       if (make_checksum("md5", ptr - 2, 8, buf,
+                ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
+               return GSS_S_FAILURE;
 
-       /* initialize the the cksum */
-       switch (signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               checksum_type = CKSUMTYPE_RSA_MD5;
-               break;
-       default:
-               ret = GSS_S_DEFECTIVE_TOKEN;
-               goto out;
-       }
-
-       switch (signalg) {
-       case SGN_ALG_DES_MAC_MD5:
-               ret = make_checksum(checksum_type, ptr - 2, 8, buf,
-                        ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
-               if (ret)
-                       goto out;
-
-               ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
-                                  md5cksum.data, md5cksum.len);
-               if (ret)
-                       goto out;
-
-               if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
-                       ret = GSS_S_BAD_SIG;
-                       goto out;
-               }
-               break;
-       default:
-               ret = GSS_S_DEFECTIVE_TOKEN;
-               goto out;
-       }
+       if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
+                          md5cksum.data, md5cksum.len))
+               return GSS_S_FAILURE;
+
+       if (memcmp(md5cksum.data + 8, ptr + 14, 8))
+               return GSS_S_BAD_SIG;
 
        /* it got through unscathed.  Make sure the context is unexpired */
 
        now = get_seconds();
 
-       ret = GSS_S_CONTEXT_EXPIRED;
        if (now > kctx->endtime)
-               goto out;
+               return GSS_S_CONTEXT_EXPIRED;
 
        /* do sequencing checks */
 
-       ret = GSS_S_BAD_SIG;
-       if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
-                                   &seqnum)))
-               goto out;
+       if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
+                                   &seqnum))
+               return GSS_S_BAD_SIG;
 
        if ((kctx->initiate && direction != 0xff) ||
            (!kctx->initiate && direction != 0))
-               goto out;
+               return GSS_S_BAD_SIG;
 
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
@@ -354,11 +284,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
        buf->head[0].iov_len -= (data_start - orig_start);
        buf->len -= (data_start - orig_start);
 
-       ret = GSS_S_DEFECTIVE_TOKEN;
        if (gss_krb5_remove_padding(buf, blocksize))
-               goto out;
+               return GSS_S_DEFECTIVE_TOKEN;
 
-       ret = GSS_S_COMPLETE;
-out:
-       return ret;
+       return GSS_S_COMPLETE;
 }
index d57f60838895d878efca25ba46628ca795237f16..41465072d0b59168cbdcb9a3aed3294a58ab7458 100644 (file)
@@ -82,133 +82,73 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
        return q;
 }
 
-static inline const void *
-get_key(const void *p, const void *end, struct crypto_blkcipher **res,
-       int *resalg)
-{
-       struct xdr_netobj       key = { 0 };
-       int                     setkey = 0;
-       char                    *alg_name;
-
-       p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
-       if (IS_ERR(p))
-               goto out_err;
-       p = simple_get_netobj(p, end, &key);
-       if (IS_ERR(p))
-               goto out_err;
-
-       switch (*resalg) {
-               case NID_des_cbc:
-                       alg_name = "cbc(des)";
-                       setkey = 1;
-                       break;
-               case NID_cast5_cbc:
-                       /* XXXX here in name only, not used */
-                       alg_name = "cbc(cast5)";
-                       setkey = 0; /* XXX will need to set to 1 */
-                       break;
-               case NID_md5:
-                       if (key.len == 0) {
-                               dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
-                       }
-                       alg_name = "md5";
-                       setkey = 0;
-                       break;
-               default:
-                       dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
-                       goto out_err_free_key;
-       }
-       *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(*res)) {
-               printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
-               *res = NULL;
-               goto out_err_free_key;
-       }
-       if (setkey) {
-               if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
-                       printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
-                       goto out_err_free_tfm;
-               }
-       }
-
-       if(key.len > 0)
-               kfree(key.data);
-       return p;
-
-out_err_free_tfm:
-       crypto_free_blkcipher(*res);
-out_err_free_key:
-       if(key.len > 0)
-               kfree(key.data);
-       p = ERR_PTR(-EINVAL);
-out_err:
-       return p;
-}
-
 static int
 gss_import_sec_context_spkm3(const void *p, size_t len,
                                struct gss_ctx *ctx_id)
 {
        const void *end = (const void *)((const char *)p + len);
        struct  spkm3_ctx *ctx;
+       int     version;
 
        if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
                goto out_err;
 
+       p = simple_get_bytes(p, end, &version, sizeof(version));
+       if (IS_ERR(p))
+               goto out_err_free_ctx;
+       if (version != 1) {
+               dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n");
+               goto out_err_free_ctx;
+       }
+
        p = simple_get_netobj(p, end, &ctx->ctx_id);
        if (IS_ERR(p))
                goto out_err_free_ctx;
 
-       p = simple_get_bytes(p, end, &ctx->qop, sizeof(ctx->qop));
+       p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
        if (IS_ERR(p))
                goto out_err_free_ctx_id;
 
        p = simple_get_netobj(p, end, &ctx->mech_used);
        if (IS_ERR(p))
-               goto out_err_free_mech;
+               goto out_err_free_ctx_id;
 
        p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags));
        if (IS_ERR(p))
                goto out_err_free_mech;
 
-       p = simple_get_bytes(p, end, &ctx->req_flags, sizeof(ctx->req_flags));
+       p = simple_get_netobj(p, end, &ctx->conf_alg);
        if (IS_ERR(p))
                goto out_err_free_mech;
 
-       p = simple_get_netobj(p, end, &ctx->share_key);
-       if (IS_ERR(p))
-               goto out_err_free_s_key;
-
-       p = get_key(p, end, &ctx->derived_conf_key, &ctx->conf_alg);
+       p = simple_get_netobj(p, end, &ctx->derived_conf_key);
        if (IS_ERR(p))
-               goto out_err_free_s_key;
+               goto out_err_free_conf_alg;
 
-       p = get_key(p, end, &ctx->derived_integ_key, &ctx->intg_alg);
+       p = simple_get_netobj(p, end, &ctx->intg_alg);
        if (IS_ERR(p))
-               goto out_err_free_key1;
+               goto out_err_free_conf_key;
 
-       p = simple_get_bytes(p, end, &ctx->keyestb_alg, sizeof(ctx->keyestb_alg));
+       p = simple_get_netobj(p, end, &ctx->derived_integ_key);
        if (IS_ERR(p))
-               goto out_err_free_key2;
-
-       p = simple_get_bytes(p, end, &ctx->owf_alg, sizeof(ctx->owf_alg));
-       if (IS_ERR(p))
-               goto out_err_free_key2;
+               goto out_err_free_intg_alg;
 
        if (p != end)
-               goto out_err_free_key2;
+               goto out_err_free_intg_key;
 
        ctx_id->internal_ctx_id = ctx;
 
        dprintk("Successfully imported new spkm context.\n");
        return 0;
 
-out_err_free_key2:
-       crypto_free_blkcipher(ctx->derived_integ_key);
-out_err_free_key1:
-       crypto_free_blkcipher(ctx->derived_conf_key);
-out_err_free_s_key:
-       kfree(ctx->share_key.data);
+out_err_free_intg_key:
+       kfree(ctx->derived_integ_key.data);
+out_err_free_intg_alg:
+       kfree(ctx->intg_alg.data);
+out_err_free_conf_key:
+       kfree(ctx->derived_conf_key.data);
+out_err_free_conf_alg:
+       kfree(ctx->conf_alg.data);
 out_err_free_mech:
        kfree(ctx->mech_used.data);
 out_err_free_ctx_id:
@@ -220,13 +160,16 @@ out_err:
 }
 
 static void
-gss_delete_sec_context_spkm3(void *internal_ctx) {
+gss_delete_sec_context_spkm3(void *internal_ctx)
+{
        struct spkm3_ctx *sctx = internal_ctx;
 
-       crypto_free_blkcipher(sctx->derived_integ_key);
-       crypto_free_blkcipher(sctx->derived_conf_key);
-       kfree(sctx->share_key.data);
+       kfree(sctx->derived_integ_key.data);
+       kfree(sctx->intg_alg.data);
+       kfree(sctx->derived_conf_key.data);
+       kfree(sctx->conf_alg.data);
        kfree(sctx->mech_used.data);
+       kfree(sctx->ctx_id.data);
        kfree(sctx);
 }
 
@@ -238,7 +181,6 @@ gss_verify_mic_spkm3(struct gss_ctx         *ctx,
        u32 maj_stat = 0;
        struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
-       dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
        maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
 
        dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
@@ -253,10 +195,9 @@ gss_get_mic_spkm3(struct gss_ctx   *ctx,
        u32 err = 0;
        struct spkm3_ctx *sctx = ctx->internal_ctx_id;
 
-       dprintk("RPC: gss_get_mic_spkm3\n");
-
        err = spkm3_make_token(sctx, message_buffer,
-                             message_token, SPKM_MIC_TOK);
+                               message_token, SPKM_MIC_TOK);
+       dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
        return err;
 }
 
index 18c7862bc234a989c14e26bee79212de3f2a63fc..b179d58c6249cf66b2004aa18fb0560011f0c8e5 100644 (file)
 #include <linux/sunrpc/gss_spkm3.h>
 #include <linux/random.h>
 #include <linux/crypto.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/sunrpc/xdr.h>
 
 #ifdef RPC_DEBUG
 # define RPCDBG_FACILITY        RPCDBG_AUTH
 #endif
 
+const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"};
+const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"};
+
 /*
  * spkm3_make_token()
  *
@@ -66,29 +72,23 @@ spkm3_make_token(struct spkm3_ctx *ctx,
        int                     ctxelen = 0, ctxzbit = 0;
        int                     md5elen = 0, md5zbit = 0;
 
-       dprintk("RPC: spkm3_make_token\n");
-
        now = jiffies;
 
        if (ctx->ctx_id.len != 16) {
                dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
-                       ctx->ctx_id.len);
+                               ctx->ctx_id.len);
                goto out_err;
        }
-               
-       switch (ctx->intg_alg) {
-               case NID_md5:
-                       checksum_type = CKSUMTYPE_RSA_MD5;
-                       break;
-               default:
-                       dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not"
-                               " supported\n", ctx->intg_alg);
-                       goto out_err;
-       }
-       /* XXX since we don't support WRAP, perhaps we don't care... */
-       if (ctx->conf_alg != NID_cast5_cbc) {
-               dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n",
-                       ctx->conf_alg);
+
+       if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
+               dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm."
+                               "only support hmac-md5 I-ALG.\n");
+               goto out_err;
+       } else
+               checksum_type = CKSUMTYPE_HMAC_MD5;
+
+       if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
+               dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n");
                goto out_err;
        }
 
@@ -96,10 +96,10 @@ spkm3_make_token(struct spkm3_ctx *ctx,
                /* Calculate checksum over the mic-header */
                asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
                spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
-                                        ctxelen, ctxzbit);
-
-               if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, 
-                                            text, 0, &md5cksum))
+                               ctxelen, ctxzbit);
+               if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
+                                       (char *)mic_hdr.data, mic_hdr.len,
+                                       text, 0, &md5cksum))
                        goto out_err;
 
                asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
@@ -121,7 +121,66 @@ spkm3_make_token(struct spkm3_ctx *ctx,
 
        return  GSS_S_COMPLETE;
 out_err:
+       if (md5cksum.data)
+               kfree(md5cksum.data);
+
        token->data = NULL;
        token->len = 0;
        return GSS_S_FAILURE;
 }
+
+static int
+spkm3_checksummer(struct scatterlist *sg, void *data)
+{
+       struct hash_desc *desc = data;
+
+       return crypto_hash_update(desc, sg, sg->length);
+}
+
+/* checksum the plaintext data and hdrlen bytes of the token header */
+s32
+make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
+                   unsigned int hdrlen, struct xdr_buf *body,
+                   unsigned int body_offset, struct xdr_netobj *cksum)
+{
+       char                            *cksumname;
+       struct hash_desc                desc; /* XXX add to ctx? */
+       struct scatterlist              sg[1];
+       int err;
+
+       switch (cksumtype) {
+               case CKSUMTYPE_HMAC_MD5:
+                       cksumname = "md5";
+                       break;
+               default:
+                       dprintk("RPC:      spkm3_make_checksum:"
+                                       " unsupported checksum %d", cksumtype);
+                       return GSS_S_FAILURE;
+       }
+
+       if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;
+
+       desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(desc.tfm))
+               return GSS_S_FAILURE;
+       cksum->len = crypto_hash_digestsize(desc.tfm);
+       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_hash_setkey(desc.tfm, key->data, key->len);
+       if (err)
+               goto out;
+
+       sg_set_buf(sg, header, hdrlen);
+       crypto_hash_update(&desc, sg, 1);
+
+       xdr_process_buf(body, body_offset, body->len - body_offset,
+                       spkm3_checksummer, &desc);
+       crypto_hash_final(&desc, cksum->data);
+
+out:
+       crypto_free_hash(desc.tfm);
+
+       return err ? GSS_S_FAILURE : 0;
+}
+
+EXPORT_SYMBOL(make_spkm3_checksum);
index 854a983ccf26fd7c5fbc7c9e439bbae19ad31b70..35188b6ea8f75a950709498fa5dc187e73b0c02f 100644 (file)
@@ -172,10 +172,10 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
        *(u8 *)hptr++ = zbit;
        memcpy(hptr, ctxdata, elen);
        hptr += elen;
-       *hdrlen = hptr - top; 
+       *hdrlen = hptr - top;
 }
-               
-/* 
+
+/*
  * spkm3_mic_innercontext_token()
  *
  * *tokp points to the beginning of the SPKM_MIC token  described 
index 8537f581ef9b904f7abe5d821a6dcc7da1a41dec..e54581ca75702b43321aae63bced573ceffc73b8 100644 (file)
@@ -54,70 +54,70 @@ spkm3_read_token(struct spkm3_ctx *ctx,
                struct xdr_buf *message_buffer, /* signbuf */
                int toktype)
 {
+       s32                     checksum_type;
        s32                     code;
        struct xdr_netobj       wire_cksum = {.len =0, .data = NULL};
        char                    cksumdata[16];
        struct xdr_netobj       md5cksum = {.len = 0, .data = cksumdata};
        unsigned char           *ptr = (unsigned char *)read_token->data;
-       unsigned char           *cksum;
+       unsigned char           *cksum;
        int                     bodysize, md5elen;
        int                     mic_hdrlen;
        u32                     ret = GSS_S_DEFECTIVE_TOKEN;
 
-       dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len);
-
        if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
                                        &bodysize, &ptr, read_token->len))
                goto out;
 
        /* decode the token */
 
-       if (toktype == SPKM_MIC_TOK) {
-
-               if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) 
-                       goto out;
-
-               if (*cksum++ != 0x03) {
-                       dprintk("RPC: spkm3_read_token BAD checksum type\n");
-                       goto out;
-               }
-               md5elen = *cksum++; 
-               cksum++;        /* move past the zbit */
-       
-               if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
-                       goto out;
-
-               /* HARD CODED FOR MD5 */
-
-               /* compute the checksum of the message.
-               *  ptr + 2 = start of header piece of checksum
-               *  mic_hdrlen + 2 = length of header piece of checksum
-               */
-               ret = GSS_S_DEFECTIVE_TOKEN;
-               code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, 
-                                       mic_hdrlen + 2, 
-                                       message_buffer, 0, &md5cksum);
-
-               if (code)
-                       goto out;
-
-               dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n", 
-                       wire_cksum.len);
-               dprintk("          md5cksum.data\n");
-               print_hexl((u32 *) md5cksum.data, 16, 0);
-               dprintk("          cksum.data:\n");
-               print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0);
-
-               ret = GSS_S_BAD_SIG;
-               code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
-               if (code)
-                       goto out;
-
-       } else { 
-               dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype);
+       if (toktype != SPKM_MIC_TOK) {
+               dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
+               goto out;
+       }
+
+       if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
+               goto out;
+
+       if (*cksum++ != 0x03) {
+               dprintk("RPC: spkm3_read_token BAD checksum type\n");
+               goto out;
+       }
+       md5elen = *cksum++;
+       cksum++;        /* move past the zbit */
+
+       if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
+               goto out;
+
+       /* HARD CODED FOR MD5 */
+
+       /* compute the checksum of the message.
+        * ptr + 2 = start of header piece of checksum
+        * mic_hdrlen + 2 = length of header piece of checksum
+        */
+       ret = GSS_S_DEFECTIVE_TOKEN;
+       if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
+               dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n");
+               goto out;
+       }
+
+       checksum_type = CKSUMTYPE_HMAC_MD5;
+
+       code = make_spkm3_checksum(checksum_type,
+               &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2,
+               message_buffer, 0, &md5cksum);
+
+       if (code)
+               goto out;
+
+       ret = GSS_S_BAD_SIG;
+       code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
+       if (code) {
+               dprintk("RPC: bad MIC checksum\n");
                goto out;
        }
 
+
        /* XXX: need to add expiration and sequencing */
        ret = GSS_S_COMPLETE;
 out:
index 00cb388ece032cec8aeba948bb97c0e653eede5e..d96fd466a9a46ccb460f02dba2be901f2f59f7d0 100644 (file)
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations;
 static struct file_operations content_file_operations;
 static struct file_operations cache_flush_operations;
 
-static void do_cache_clean(void *data);
-static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL);
+static void do_cache_clean(struct work_struct *work);
+static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
 
 void cache_register(struct cache_detail *cd)
 {
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd)
        spin_unlock(&cache_list_lock);
 
        /* start the cleaning process */
-       schedule_work(&cache_cleaner);
+       schedule_delayed_work(&cache_cleaner, 0);
 }
 
 int cache_unregister(struct cache_detail *cd)
@@ -461,7 +461,7 @@ static int cache_clean(void)
 /*
  * We want to regularly clean the cache, so we need to schedule some work ...
  */
-static void do_cache_clean(void *data)
+static void do_cache_clean(struct work_struct *work)
 {
        int delay = 5;
        if (cache_clean() == -1)
index dfeea4fea95a7f6dfb4ea038491aaba39ced48dc..aba528b9ae769a8aa0fb5b5ece07ff704dab2291 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/smp_lock.h>
 #include <linux/utsname.h>
 #include <linux/workqueue.h>
 
@@ -141,6 +142,10 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
        clnt->cl_vers     = version->number;
        clnt->cl_stats    = program->stats;
        clnt->cl_metrics  = rpc_alloc_iostats(clnt);
+       err = -ENOMEM;
+       if (clnt->cl_metrics == NULL)
+               goto out_no_stats;
+       clnt->cl_program  = program;
 
        if (!xprt_bound(clnt->cl_xprt))
                clnt->cl_autobind = 1;
@@ -173,6 +178,8 @@ out_no_auth:
                rpc_put_mount();
        }
 out_no_path:
+       rpc_free_iostats(clnt->cl_metrics);
+out_no_stats:
        if (clnt->cl_server != clnt->cl_inline_name)
                kfree(clnt->cl_server);
        kfree(clnt);
@@ -252,12 +259,19 @@ struct rpc_clnt *
 rpc_clone_client(struct rpc_clnt *clnt)
 {
        struct rpc_clnt *new;
+       int err = -ENOMEM;
 
        new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
        if (!new)
                goto out_no_clnt;
        atomic_set(&new->cl_count, 1);
        atomic_set(&new->cl_users, 0);
+       new->cl_metrics = rpc_alloc_iostats(clnt);
+       if (new->cl_metrics == NULL)
+               goto out_no_stats;
+       err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
+       if (err != 0)
+               goto out_no_path;
        new->cl_parent = clnt;
        atomic_inc(&clnt->cl_count);
        new->cl_xprt = xprt_get(clnt->cl_xprt);
@@ -265,16 +279,17 @@ rpc_clone_client(struct rpc_clnt *clnt)
        new->cl_autobind = 0;
        new->cl_oneshot = 0;
        new->cl_dead = 0;
-       if (!IS_ERR(new->cl_dentry))
-               dget(new->cl_dentry);
        rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
        if (new->cl_auth)
                atomic_inc(&new->cl_auth->au_count);
-       new->cl_metrics = rpc_alloc_iostats(clnt);
        return new;
+out_no_path:
+       rpc_free_iostats(new->cl_metrics);
+out_no_stats:
+       kfree(new);
 out_no_clnt:
-       printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
-       return ERR_PTR(-ENOMEM);
+       dprintk("RPC: %s returned error %d\n", __FUNCTION__, err);
+       return ERR_PTR(err);
 }
 
 /*
@@ -327,16 +342,14 @@ rpc_destroy_client(struct rpc_clnt *clnt)
                rpcauth_destroy(clnt->cl_auth);
                clnt->cl_auth = NULL;
        }
-       if (clnt->cl_parent != clnt) {
-               if (!IS_ERR(clnt->cl_dentry))
-                       dput(clnt->cl_dentry);
-               rpc_destroy_client(clnt->cl_parent);
-               goto out_free;
-       }
        if (!IS_ERR(clnt->cl_dentry)) {
                rpc_rmdir(clnt->cl_dentry);
                rpc_put_mount();
        }
+       if (clnt->cl_parent != clnt) {
+               rpc_destroy_client(clnt->cl_parent);
+               goto out_free;
+       }
        if (clnt->cl_server != clnt->cl_inline_name)
                kfree(clnt->cl_server);
 out_free:
@@ -466,10 +479,9 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
 
        BUG_ON(flags & RPC_TASK_ASYNC);
 
-       status = -ENOMEM;
        task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
        if (task == NULL)
-               goto out;
+               return -ENOMEM;
 
        /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
        rpc_task_sigmask(task, &oldset);
@@ -478,15 +490,17 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
 
        /* Set up the call info struct and execute the task */
        status = task->tk_status;
-       if (status == 0) {
-               atomic_inc(&task->tk_count);
-               status = rpc_execute(task);
-               if (status == 0)
-                       status = task->tk_status;
+       if (status != 0) {
+               rpc_release_task(task);
+               goto out;
        }
-       rpc_restore_sigmask(&oldset);
-       rpc_release_task(task);
+       atomic_inc(&task->tk_count);
+       status = rpc_execute(task);
+       if (status == 0)
+               status = task->tk_status;
+       rpc_put_task(task);
 out:
+       rpc_restore_sigmask(&oldset);
        return status;
 }
 
@@ -528,8 +542,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
        rpc_restore_sigmask(&oldset);           
        return status;
 out_release:
-       if (tk_ops->rpc_release != NULL)
-               tk_ops->rpc_release(data);
+       rpc_release_calldata(tk_ops, data);
        return status;
 }
 
@@ -581,7 +594,11 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr);
 char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
 {
        struct rpc_xprt *xprt = clnt->cl_xprt;
-       return xprt->ops->print_addr(xprt, format);
+
+       if (xprt->address_strings[format] != NULL)
+               return xprt->address_strings[format];
+       else
+               return "unprintable";
 }
 EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
 
@@ -811,8 +828,10 @@ call_encode(struct rpc_task *task)
        if (encode == NULL)
                return;
 
+       lock_kernel();
        task->tk_status = rpcauth_wrap_req(task, encode, req, p,
                        task->tk_msg.rpc_argp);
+       unlock_kernel();
        if (task->tk_status == -ENOMEM) {
                /* XXX: Is this sane? */
                rpc_delay(task, 3*HZ);
@@ -1143,9 +1162,12 @@ call_decode(struct rpc_task *task)
 
        task->tk_action = rpc_exit_task;
 
-       if (decode)
+       if (decode) {
+               lock_kernel();
                task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
                                                      task->tk_msg.rpc_resp);
+               unlock_kernel();
+       }
        dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
                                        task->tk_status);
        return;
index e52afab413ded56212cad69a2801df58bc6b6d6c..3946ec3eb517a674881fc3b8d35277f2a5f3de44 100644 (file)
@@ -101,14 +101,14 @@ void rpc_getport(struct rpc_task *task)
        /* Autobind on cloned rpc clients is discouraged */
        BUG_ON(clnt->cl_parent != clnt);
 
+       status = -EACCES;               /* tell caller to check again */
+       if (xprt_test_and_set_binding(xprt))
+               goto bailout_nowake;
+
        /* Put self on queue before sending rpcbind request, in case
         * pmap_getport_done completes before we return from rpc_run_task */
        rpc_sleep_on(&xprt->binding, task, NULL, NULL);
 
-       status = -EACCES;               /* tell caller to check again */
-       if (xprt_test_and_set_binding(xprt))
-               goto bailout_nofree;
-
        /* Someone else may have bound if we slept */
        status = 0;
        if (xprt_bound(xprt))
@@ -134,7 +134,7 @@ void rpc_getport(struct rpc_task *task)
        child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map);
        if (IS_ERR(child))
                goto bailout;
-       rpc_release_task(child);
+       rpc_put_task(child);
 
        task->tk_xprt->stat.bind_count++;
        return;
@@ -143,8 +143,9 @@ bailout:
        pmap_map_free(map);
        xprt_put(xprt);
 bailout_nofree:
-       task->tk_status = status;
        pmap_wake_portmap_waiters(xprt, status);
+bailout_nowake:
+       task->tk_status = status;
 }
 
 #ifdef CONFIG_ROOT_NFS
index 9a0b41a97f90764f84636811137a76cdfff5ac3b..19703aa9659e7bc37726ce54360d1b1d636e3fc0 100644 (file)
@@ -33,7 +33,7 @@ static int rpc_mount_count;
 static struct file_system_type rpc_pipe_fs_type;
 
 
-static kmem_cache_t *rpc_inode_cachep __read_mostly;
+static struct kmem_cache *rpc_inode_cachep __read_mostly;
 
 #define RPC_UPCALL_TIMEOUT (30*HZ)
 
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
 }
 
 static void
-rpc_timeout_upcall_queue(void *data)
+rpc_timeout_upcall_queue(struct work_struct *work)
 {
        LIST_HEAD(free_list);
-       struct rpc_inode *rpci = (struct rpc_inode *)data;
+       struct rpc_inode *rpci =
+               container_of(work, struct rpc_inode, queue_timeout.work);
        struct inode *inode = &rpci->vfs_inode;
        void (*destroy_msg)(struct rpc_pipe_msg *);
 
@@ -142,7 +143,7 @@ static struct inode *
 rpc_alloc_inode(struct super_block *sb)
 {
        struct rpc_inode *rpci;
-       rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL);
+       rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
        if (!rpci)
                return NULL;
        return &rpci->vfs_inode;
@@ -823,7 +824,7 @@ static struct file_system_type rpc_pipe_fs_type = {
 };
 
 static void
-init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
 {
        struct rpc_inode *rpci = (struct rpc_inode *) foo;
 
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
                INIT_LIST_HEAD(&rpci->pipe);
                rpci->pipelen = 0;
                init_waitqueue_head(&rpci->waitq);
-               INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci);
+               INIT_DELAYED_WORK(&rpci->queue_timeout,
+                                   rpc_timeout_upcall_queue);
                rpci->ops = NULL;
        }
 }
index a1ab4eed41f4b5a495f18573aef04b3457403ba3..79bc4cdf5d4861e70c2614012dfbec35d7409e72 100644 (file)
@@ -34,14 +34,14 @@ static int                  rpc_task_id;
 #define RPC_BUFFER_MAXSIZE     (2048)
 #define RPC_BUFFER_POOLSIZE    (8)
 #define RPC_TASK_POOLSIZE      (8)
-static kmem_cache_t    *rpc_task_slabp __read_mostly;
-static kmem_cache_t    *rpc_buffer_slabp __read_mostly;
+static struct kmem_cache       *rpc_task_slabp __read_mostly;
+static struct kmem_cache       *rpc_buffer_slabp __read_mostly;
 static mempool_t       *rpc_task_mempool __read_mostly;
 static mempool_t       *rpc_buffer_mempool __read_mostly;
 
 static void                    __rpc_default_timer(struct rpc_task *task);
 static void                    rpciod_killall(void);
-static void                    rpc_async_schedule(void *);
+static void                    rpc_async_schedule(struct work_struct *);
 
 /*
  * RPC tasks sit here while waiting for conditions to improve.
@@ -266,12 +266,28 @@ static int rpc_wait_bit_interruptible(void *word)
        return 0;
 }
 
+static void rpc_set_active(struct rpc_task *task)
+{
+       if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
+               return;
+       spin_lock(&rpc_sched_lock);
+#ifdef RPC_DEBUG
+       task->tk_magic = RPC_TASK_MAGIC_ID;
+       task->tk_pid = rpc_task_id++;
+#endif
+       /* Add to global list of all tasks */
+       list_add_tail(&task->tk_task, &all_tasks);
+       spin_unlock(&rpc_sched_lock);
+}
+
 /*
  * Mark an RPC call as having completed by clearing the 'active' bit
  */
-static inline void rpc_mark_complete_task(struct rpc_task *task)
+static void rpc_mark_complete_task(struct rpc_task *task)
 {
-       rpc_clear_active(task);
+       smp_mb__before_clear_bit();
+       clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
+       smp_mb__after_clear_bit();
        wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
 }
 
@@ -295,17 +311,19 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
  */
 static void rpc_make_runnable(struct rpc_task *task)
 {
-       int do_ret;
-
        BUG_ON(task->tk_timeout_fn);
-       do_ret = rpc_test_and_set_running(task);
        rpc_clear_queued(task);
-       if (do_ret)
+       if (rpc_test_and_set_running(task))
                return;
+       /* We might have raced */
+       if (RPC_IS_QUEUED(task)) {
+               rpc_clear_running(task);
+               return;
+       }
        if (RPC_IS_ASYNC(task)) {
                int status;
 
-               INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
+               INIT_WORK(&task->u.tk_work, rpc_async_schedule);
                status = queue_work(task->tk_workqueue, &task->u.tk_work);
                if (status < 0) {
                        printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
@@ -333,9 +351,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
                return;
        }
 
-       /* Mark the task as being activated if so needed */
-       rpc_set_active(task);
-
        __rpc_add_wait_queue(q, task);
 
        BUG_ON(task->tk_callback != NULL);
@@ -346,6 +361,9 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
                                rpc_action action, rpc_action timer)
 {
+       /* Mark the task as being activated if so needed */
+       rpc_set_active(task);
+
        /*
         * Protect the queue operations.
         */
@@ -409,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task)
  */
 void rpc_wake_up_task(struct rpc_task *task)
 {
+       rcu_read_lock_bh();
        if (rpc_start_wakeup(task)) {
                if (RPC_IS_QUEUED(task)) {
                        struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
 
-                       spin_lock_bh(&queue->lock);
+                       /* Note: we're already in a bh-safe context */
+                       spin_lock(&queue->lock);
                        __rpc_do_wake_up_task(task);
-                       spin_unlock_bh(&queue->lock);
+                       spin_unlock(&queue->lock);
                }
                rpc_finish_wakeup(task);
        }
+       rcu_read_unlock_bh();
 }
 
 /*
@@ -481,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
        struct rpc_task *task = NULL;
 
        dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
-       spin_lock_bh(&queue->lock);
+       rcu_read_lock_bh();
+       spin_lock(&queue->lock);
        if (RPC_IS_PRIORITY(queue))
                task = __rpc_wake_up_next_priority(queue);
        else {
                task_for_first(task, &queue->tasks[0])
                        __rpc_wake_up_task(task);
        }
-       spin_unlock_bh(&queue->lock);
+       spin_unlock(&queue->lock);
+       rcu_read_unlock_bh();
 
        return task;
 }
@@ -504,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
        struct rpc_task *task, *next;
        struct list_head *head;
 
-       spin_lock_bh(&queue->lock);
+       rcu_read_lock_bh();
+       spin_lock(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
                list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -513,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
                        break;
                head--;
        }
-       spin_unlock_bh(&queue->lock);
+       spin_unlock(&queue->lock);
+       rcu_read_unlock_bh();
 }
 
 /**
@@ -528,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
        struct rpc_task *task, *next;
        struct list_head *head;
 
-       spin_lock_bh(&queue->lock);
+       rcu_read_lock_bh();
+       spin_lock(&queue->lock);
        head = &queue->tasks[queue->maxpriority];
        for (;;) {
                list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -539,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
                        break;
                head--;
        }
-       spin_unlock_bh(&queue->lock);
+       spin_unlock(&queue->lock);
+       rcu_read_unlock_bh();
 }
 
 static void __rpc_atrun(struct rpc_task *task)
@@ -561,7 +588,9 @@ void rpc_delay(struct rpc_task *task, unsigned long delay)
  */
 static void rpc_prepare_task(struct rpc_task *task)
 {
+       lock_kernel();
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
+       unlock_kernel();
 }
 
 /*
@@ -571,7 +600,9 @@ void rpc_exit_task(struct rpc_task *task)
 {
        task->tk_action = NULL;
        if (task->tk_ops->rpc_call_done != NULL) {
+               lock_kernel();
                task->tk_ops->rpc_call_done(task, task->tk_calldata);
+               unlock_kernel();
                if (task->tk_action != NULL) {
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
@@ -581,6 +612,15 @@ void rpc_exit_task(struct rpc_task *task)
 }
 EXPORT_SYMBOL(rpc_exit_task);
 
+void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
+{
+       if (ops->rpc_release != NULL) {
+               lock_kernel();
+               ops->rpc_release(calldata);
+               unlock_kernel();
+       }
+}
+
 /*
  * This is the RPC `scheduler' (or rather, the finite state machine).
  */
@@ -615,9 +655,7 @@ static int __rpc_execute(struct rpc_task *task)
                         */
                        save_callback=task->tk_callback;
                        task->tk_callback=NULL;
-                       lock_kernel();
                        save_callback(task);
-                       unlock_kernel();
                }
 
                /*
@@ -628,9 +666,7 @@ static int __rpc_execute(struct rpc_task *task)
                if (!RPC_IS_QUEUED(task)) {
                        if (task->tk_action == NULL)
                                break;
-                       lock_kernel();
                        task->tk_action(task);
-                       unlock_kernel();
                }
 
                /*
@@ -671,8 +707,6 @@ static int __rpc_execute(struct rpc_task *task)
        }
 
        dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
-       /* Wake up anyone who is waiting for task completion */
-       rpc_mark_complete_task(task);
        /* Release all resources associated with the task */
        rpc_release_task(task);
        return status;
@@ -695,9 +729,9 @@ rpc_execute(struct rpc_task *task)
        return __rpc_execute(task);
 }
 
-static void rpc_async_schedule(void *arg)
+static void rpc_async_schedule(struct work_struct *work)
 {
-       __rpc_execute((struct rpc_task *)arg);
+       __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
 }
 
 /**
@@ -786,15 +820,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
                        task->tk_flags |= RPC_TASK_NOINTR;
        }
 
-#ifdef RPC_DEBUG
-       task->tk_magic = RPC_TASK_MAGIC_ID;
-       task->tk_pid = rpc_task_id++;
-#endif
-       /* Add to global list of all tasks */
-       spin_lock(&rpc_sched_lock);
-       list_add_tail(&task->tk_task, &all_tasks);
-       spin_unlock(&rpc_sched_lock);
-
        BUG_ON(task->tk_ops == NULL);
 
        /* starting timestamp */
@@ -810,8 +835,9 @@ rpc_alloc_task(void)
        return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
 }
 
-static void rpc_free_task(struct rpc_task *task)
+static void rpc_free_task(struct rcu_head *rcu)
 {
+       struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
        dprintk("RPC: %4d freeing task\n", task->tk_pid);
        mempool_free(task, rpc_task_mempool);
 }
@@ -847,16 +873,34 @@ cleanup:
        goto out;
 }
 
-void rpc_release_task(struct rpc_task *task)
+
+void rpc_put_task(struct rpc_task *task)
 {
        const struct rpc_call_ops *tk_ops = task->tk_ops;
        void *calldata = task->tk_calldata;
 
+       if (!atomic_dec_and_test(&task->tk_count))
+               return;
+       /* Release resources */
+       if (task->tk_rqstp)
+               xprt_release(task);
+       if (task->tk_msg.rpc_cred)
+               rpcauth_unbindcred(task);
+       if (task->tk_client) {
+               rpc_release_client(task->tk_client);
+               task->tk_client = NULL;
+       }
+       if (task->tk_flags & RPC_TASK_DYNAMIC)
+               call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
+       rpc_release_calldata(tk_ops, calldata);
+}
+EXPORT_SYMBOL(rpc_put_task);
+
+void rpc_release_task(struct rpc_task *task)
+{
 #ifdef RPC_DEBUG
        BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
 #endif
-       if (!atomic_dec_and_test(&task->tk_count))
-               return;
        dprintk("RPC: %4d release task\n", task->tk_pid);
 
        /* Remove from global task list */
@@ -869,23 +913,13 @@ void rpc_release_task(struct rpc_task *task)
        /* Synchronously delete any running timer */
        rpc_delete_timer(task);
 
-       /* Release resources */
-       if (task->tk_rqstp)
-               xprt_release(task);
-       if (task->tk_msg.rpc_cred)
-               rpcauth_unbindcred(task);
-       if (task->tk_client) {
-               rpc_release_client(task->tk_client);
-               task->tk_client = NULL;
-       }
-
 #ifdef RPC_DEBUG
        task->tk_magic = 0;
 #endif
-       if (task->tk_flags & RPC_TASK_DYNAMIC)
-               rpc_free_task(task);
-       if (tk_ops->rpc_release)
-               tk_ops->rpc_release(calldata);
+       /* Wake up anyone who is waiting for task completion */
+       rpc_mark_complete_task(task);
+
+       rpc_put_task(task);
 }
 
 /**
@@ -902,8 +936,7 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
        struct rpc_task *task;
        task = rpc_new_task(clnt, flags, ops, data);
        if (task == NULL) {
-               if (ops->rpc_release != NULL)
-                       ops->rpc_release(data);
+               rpc_release_calldata(ops, data);
                return ERR_PTR(-ENOMEM);
        }
        atomic_inc(&task->tk_count);
index 2635c543ba067979f4d31b95227117d1451ded73..634885b0c04dcc5e6e4909297b7368e73898086d 100644 (file)
@@ -16,7 +16,7 @@
 
 
 /**
- * skb_read_bits - copy some data bits from skb to internal buffer
+ * xdr_skb_read_bits - copy some data bits from skb to internal buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
  * Possibly called several times to iterate over an sk_buff and copy
  * data out of it.
  */
-static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
+size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
        if (len > desc->count)
                len = desc->count;
-       if (skb_copy_bits(desc->skb, desc->offset, to, len))
+       if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
                return 0;
        desc->count -= len;
        desc->offset += len;
@@ -36,14 +36,14 @@ static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
 }
 
 /**
- * skb_read_and_csum_bits - copy and checksum from skb to buffer
+ * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
  * @desc: sk_buff copy helper
  * @to: copy destination
  * @len: number of bytes to copy
  *
  * Same as skb_read_bits, but calculate a checksum at the same time.
  */
-static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
+static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
 {
        unsigned int pos;
        __wsum csum2;
@@ -66,7 +66,7 @@ static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
  * @copy_actor: virtual method for copying data
  *
  */
-ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor)
+ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
 {
        struct page     **ppage = xdr->pages;
        unsigned int    len, pglen = xdr->page_len;
@@ -148,7 +148,7 @@ out:
  */
 int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 {
-       skb_reader_t    desc;
+       struct xdr_skb_reader   desc;
 
        desc.skb = skb;
        desc.offset = sizeof(struct udphdr);
@@ -158,7 +158,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
                goto no_checksum;
 
        desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0)
+       if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
                return -1;
        if (desc.offset != skb->len) {
                __wsum csum2;
@@ -173,7 +173,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
                netdev_rx_csum_fault(skb->dev);
        return 0;
 no_checksum:
-       if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
+       if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
                return -1;
        if (desc.count)
                return -1;
index 192dff5dabcbc76258a378179d7132f5e3845bf8..d85fddeb6388f54e4293b85e1e64da965c2ebd76 100644 (file)
@@ -33,7 +33,6 @@ EXPORT_SYMBOL(rpciod_down);
 EXPORT_SYMBOL(rpciod_up);
 EXPORT_SYMBOL(rpc_new_task);
 EXPORT_SYMBOL(rpc_wake_up_status);
-EXPORT_SYMBOL(rpc_release_task);
 
 /* RPC client functions */
 EXPORT_SYMBOL(rpc_clone_client);
@@ -139,6 +138,8 @@ EXPORT_SYMBOL(nlm_debug);
 extern int register_rpc_pipefs(void);
 extern void unregister_rpc_pipefs(void);
 extern struct cache_detail ip_map_cache;
+extern int init_socket_xprt(void);
+extern void cleanup_socket_xprt(void);
 
 static int __init
 init_sunrpc(void)
@@ -156,6 +157,7 @@ init_sunrpc(void)
        rpc_proc_init();
 #endif
        cache_register(&ip_map_cache);
+       init_socket_xprt();
 out:
        return err;
 }
@@ -163,6 +165,7 @@ out:
 static void __exit
 cleanup_sunrpc(void)
 {
+       cleanup_socket_xprt();
        unregister_rpc_pipefs();
        rpc_destroy_mempool();
        if (cache_unregister(&ip_map_cache))
index ee9bb1522d5ecb54c245d060af9669edf34cd216..c7bb5f7f21a587213a4003ec1f5218492a579d3d 100644 (file)
@@ -119,7 +119,8 @@ EXPORT_SYMBOL(svc_auth_unregister);
 #define        DN_HASHMASK     (DN_HASHMAX-1)
 
 static struct hlist_head       auth_domain_table[DN_HASHMAX];
-static spinlock_t      auth_domain_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t      auth_domain_lock =
+       __SPIN_LOCK_UNLOCKED(auth_domain_lock);
 
 void auth_domain_put(struct auth_domain *dom)
 {
index 64ca1f61dd9497fde7cb31a235468ea1c2606fbe..99f54fb6d66903ae9b9887c76e4138838f60a43a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <net/sock.h>
 #include <net/checksum.h>
 #include <net/ip.h>
@@ -84,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req);
  */
 static int svc_conn_age_period = 6*60;
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       BUG_ON(sk->sk_lock.owner != NULL);
+       switch (sk->sk_family) {
+       case AF_INET:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+                   &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+               break;
+
+       case AF_INET6:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+                   &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+               break;
+
+       default:
+               BUG();
+       }
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /*
  * Queue up an idle server thread.  Must have pool->sp_lock held.
  * Note: this is really a stack rather than a queue, so that we only
@@ -1556,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
        if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
                return error;
 
+       svc_reclassify_socket(sock);
+
        if (type == SOCK_STREAM)
                sock->sk->sk_reuse = 1; /* allow address reuse */
        error = kernel_bind(sock, (struct sockaddr *) sin,
index d89b048ad6bba57a4964c72a216a93d3a3bff4fd..82b27528d0c45b50b2b25416b84b9f03b1d6ac74 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/sunrpc/types.h>
 #include <linux/sunrpc/sched.h>
 #include <linux/sunrpc/stats.h>
-#include <linux/sunrpc/xprt.h>
 
 /*
  * Declare the debug flags here
@@ -119,11 +118,6 @@ done:
 }
 
 
-static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
-static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
-static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
-static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
-
 static ctl_table debug_table[] = {
        {
                .ctl_name       = CTL_RPCDEBUG,
@@ -157,50 +151,6 @@ static ctl_table debug_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dodebug
        }, 
-       {
-               .ctl_name       = CTL_SLOTTABLE_UDP,
-               .procname       = "udp_slot_table_entries",
-               .data           = &xprt_udp_slot_table_entries,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &min_slot_table_size,
-               .extra2         = &max_slot_table_size
-       },
-       {
-               .ctl_name       = CTL_SLOTTABLE_TCP,
-               .procname       = "tcp_slot_table_entries",
-               .data           = &xprt_tcp_slot_table_entries,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &min_slot_table_size,
-               .extra2         = &max_slot_table_size
-       },
-       {
-               .ctl_name       = CTL_MIN_RESVPORT,
-               .procname       = "min_resvport",
-               .data           = &xprt_min_resvport,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &xprt_min_resvport_limit,
-               .extra2         = &xprt_max_resvport_limit
-       },
-       {
-               .ctl_name       = CTL_MAX_RESVPORT,
-               .procname       = "max_resvport",
-               .data           = &xprt_max_resvport,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &xprt_min_resvport_limit,
-               .extra2         = &xprt_max_resvport_limit
-       },
        { .ctl_name = 0 }
 };
 
index 9022eb8b37ed9d05ea2e1bd983747b49b4c0ff7b..a0af250ca319752a6c940ff6be4ef7beebc937f2 100644 (file)
@@ -640,41 +640,30 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
        buf->buflen = buf->len = iov->iov_len;
 }
 
-/* Sets subiov to the intersection of iov with the buffer of length len
- * starting base bytes after iov.  Indicates empty intersection by setting
- * length of subiov to zero.  Decrements len by length of subiov, sets base
- * to zero (or decrements it by length of iov if subiov is empty). */
-static void
-iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
-{
-       if (*base > iov->iov_len) {
-               subiov->iov_base = NULL;
-               subiov->iov_len = 0;
-               *base -= iov->iov_len;
-       } else {
-               subiov->iov_base = iov->iov_base + *base;
-               subiov->iov_len = min(*len, (int)iov->iov_len - *base);
-               *base = 0;
-       }
-       *len -= subiov->iov_len; 
-}
-
 /* Sets subbuf to the portion of buf of length len beginning base bytes
  * from the start of buf. Returns -1 if base of length are out of bounds. */
 int
 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
-                       int base, int len)
+                       unsigned int base, unsigned int len)
 {
-       int i;
-
        subbuf->buflen = subbuf->len = len;
-       iov_subsegment(buf->head, subbuf->head, &base, &len);
+       if (base < buf->head[0].iov_len) {
+               subbuf->head[0].iov_base = buf->head[0].iov_base + base;
+               subbuf->head[0].iov_len = min_t(unsigned int, len,
+                                               buf->head[0].iov_len - base);
+               len -= subbuf->head[0].iov_len;
+               base = 0;
+       } else {
+               subbuf->head[0].iov_base = NULL;
+               subbuf->head[0].iov_len = 0;
+               base -= buf->head[0].iov_len;
+       }
 
        if (base < buf->page_len) {
-               i = (base + buf->page_base) >> PAGE_CACHE_SHIFT;
-               subbuf->pages = &buf->pages[i];
-               subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK;
-               subbuf->page_len = min((int)buf->page_len - base, len);
+               subbuf->page_len = min(buf->page_len - base, len);
+               base += buf->page_base;
+               subbuf->page_base = base & ~PAGE_CACHE_MASK;
+               subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
                len -= subbuf->page_len;
                base = 0;
        } else {
@@ -682,66 +671,85 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                subbuf->page_len = 0;
        }
 
-       iov_subsegment(buf->tail, subbuf->tail, &base, &len);
+       if (base < buf->tail[0].iov_len) {
+               subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
+               subbuf->tail[0].iov_len = min_t(unsigned int, len,
+                                               buf->tail[0].iov_len - base);
+               len -= subbuf->tail[0].iov_len;
+               base = 0;
+       } else {
+               subbuf->tail[0].iov_base = NULL;
+               subbuf->tail[0].iov_len = 0;
+               base -= buf->tail[0].iov_len;
+       }
+
        if (base || len)
                return -1;
        return 0;
 }
 
-/* obj is assumed to point to allocated memory of size at least len: */
-int
-read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
+static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 {
-       struct xdr_buf subbuf;
-       int this_len;
-       int status;
+       unsigned int this_len;
 
-       status = xdr_buf_subsegment(buf, &subbuf, base, len);
-       if (status)
-               goto out;
-       this_len = min(len, (int)subbuf.head[0].iov_len);
-       memcpy(obj, subbuf.head[0].iov_base, this_len);
+       this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
+       memcpy(obj, subbuf->head[0].iov_base, this_len);
        len -= this_len;
        obj += this_len;
-       this_len = min(len, (int)subbuf.page_len);
+       this_len = min_t(unsigned int, len, subbuf->page_len);
        if (this_len)
-               _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len);
+               _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
        len -= this_len;
        obj += this_len;
-       this_len = min(len, (int)subbuf.tail[0].iov_len);
-       memcpy(obj, subbuf.tail[0].iov_base, this_len);
-out:
-       return status;
+       this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
+       memcpy(obj, subbuf->tail[0].iov_base, this_len);
 }
 
 /* obj is assumed to point to allocated memory of size at least len: */
-int
-write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
+int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 {
        struct xdr_buf subbuf;
-       int this_len;
        int status;
 
        status = xdr_buf_subsegment(buf, &subbuf, base, len);
-       if (status)
-               goto out;
-       this_len = min(len, (int)subbuf.head[0].iov_len);
-       memcpy(subbuf.head[0].iov_base, obj, this_len);
+       if (status != 0)
+               return status;
+       __read_bytes_from_xdr_buf(&subbuf, obj, len);
+       return 0;
+}
+
+static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
+{
+       unsigned int this_len;
+
+       this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
+       memcpy(subbuf->head[0].iov_base, obj, this_len);
        len -= this_len;
        obj += this_len;
-       this_len = min(len, (int)subbuf.page_len);
+       this_len = min_t(unsigned int, len, subbuf->page_len);
        if (this_len)
-               _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len);
+               _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
        len -= this_len;
        obj += this_len;
-       this_len = min(len, (int)subbuf.tail[0].iov_len);
-       memcpy(subbuf.tail[0].iov_base, obj, this_len);
-out:
-       return status;
+       this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
+       memcpy(subbuf->tail[0].iov_base, obj, this_len);
+}
+
+/* obj is assumed to point to allocated memory of size at least len: */
+int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
+{
+       struct xdr_buf subbuf;
+       int status;
+
+       status = xdr_buf_subsegment(buf, &subbuf, base, len);
+       if (status != 0)
+               return status;
+       __write_bytes_to_xdr_buf(&subbuf, obj, len);
+       return 0;
 }
 
 int
-xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
+xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 {
        __be32  raw;
        int     status;
@@ -754,7 +762,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
 }
 
 int
-xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
+xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 {
        __be32  raw = htonl(obj);
 
@@ -765,44 +773,37 @@ xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
  * entirely in the head or the tail, set object to point to it; otherwise
  * try to find space for it at the end of the tail, copy it there, and
  * set obj to point to it. */
-int
-xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
+int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 {
-       u32     tail_offset = buf->head[0].iov_len + buf->page_len;
-       u32     obj_end_offset;
+       struct xdr_buf subbuf;
 
        if (xdr_decode_word(buf, offset, &obj->len))
-               goto out;
-       obj_end_offset = offset + 4 + obj->len;
-
-       if (obj_end_offset <= buf->head[0].iov_len) {
-               /* The obj is contained entirely in the head: */
-               obj->data = buf->head[0].iov_base + offset + 4;
-       } else if (offset + 4 >= tail_offset) {
-               if (obj_end_offset - tail_offset
-                               > buf->tail[0].iov_len)
-                       goto out;
-               /* The obj is contained entirely in the tail: */
-               obj->data = buf->tail[0].iov_base
-                       + offset - tail_offset + 4;
-       } else {
-               /* use end of tail as storage for obj:
-                * (We don't copy to the beginning because then we'd have
-                * to worry about doing a potentially overlapping copy.
-                * This assumes the object is at most half the length of the
-                * tail.) */
-               if (obj->len > buf->tail[0].iov_len)
-                       goto out;
-               obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - 
-                               obj->len;
-               if (read_bytes_from_xdr_buf(buf, offset + 4,
-                                       obj->data, obj->len))
-                       goto out;
+               return -EFAULT;
+       if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
+               return -EFAULT;
 
-       }
+       /* Is the obj contained entirely in the head? */
+       obj->data = subbuf.head[0].iov_base;
+       if (subbuf.head[0].iov_len == obj->len)
+               return 0;
+       /* ..or is the obj contained entirely in the tail? */
+       obj->data = subbuf.tail[0].iov_base;
+       if (subbuf.tail[0].iov_len == obj->len)
+               return 0;
+
+       /* use end of tail as storage for obj:
+        * (We don't copy to the beginning because then we'd have
+        * to worry about doing a potentially overlapping copy.
+        * This assumes the object is at most half the length of the
+        * tail.) */
+       if (obj->len > buf->buflen - buf->len)
+               return -ENOMEM;
+       if (buf->tail[0].iov_len != 0)
+               obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
+       else
+               obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
+       __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
        return 0;
-out:
-       return -1;
 }
 
 /* Returns 0 on success, or else a negative error code. */
@@ -1020,3 +1021,71 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
 
        return xdr_xcode_array2(buf, base, desc, 1);
 }
+
+int
+xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
+                int (*actor)(struct scatterlist *, void *), void *data)
+{
+       int i, ret = 0;
+       unsigned page_len, thislen, page_offset;
+       struct scatterlist      sg[1];
+
+       if (offset >= buf->head[0].iov_len) {
+               offset -= buf->head[0].iov_len;
+       } else {
+               thislen = buf->head[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               if (ret)
+                       goto out;
+               offset = 0;
+               len -= thislen;
+       }
+       if (len == 0)
+               goto out;
+
+       if (offset >= buf->page_len) {
+               offset -= buf->page_len;
+       } else {
+               page_len = buf->page_len - offset;
+               if (page_len > len)
+                       page_len = len;
+               len -= page_len;
+               page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
+               i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
+               thislen = PAGE_CACHE_SIZE - page_offset;
+               do {
+                       if (thislen > page_len)
+                               thislen = page_len;
+                       sg->page = buf->pages[i];
+                       sg->offset = page_offset;
+                       sg->length = thislen;
+                       ret = actor(sg, data);
+                       if (ret)
+                               goto out;
+                       page_len -= thislen;
+                       i++;
+                       page_offset = 0;
+                       thislen = PAGE_CACHE_SIZE;
+               } while (page_len != 0);
+               offset = 0;
+       }
+       if (len == 0)
+               goto out;
+       if (offset < buf->tail[0].iov_len) {
+               thislen = buf->tail[0].iov_len - offset;
+               if (thislen > len)
+                       thislen = len;
+               sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
+               ret = actor(sg, data);
+               len -= thislen;
+       }
+       if (len != 0)
+               ret = -EINVAL;
+out:
+       return ret;
+}
+EXPORT_SYMBOL(xdr_process_buf);
+
index 80857470dc112f15fe18bbd91510e6147243e5e3..7a3999f0a4a2aabc7e156bdb459a68a5cab7d6a7 100644 (file)
@@ -459,7 +459,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
                if (to->to_maxval && req->rq_timeout >= to->to_maxval)
                        req->rq_timeout = to->to_maxval;
                req->rq_retries++;
-               pprintk("RPC: %lu retrans\n", jiffies);
        } else {
                req->rq_timeout = to->to_initval;
                req->rq_retries = 0;
@@ -468,7 +467,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
                spin_lock_bh(&xprt->transport_lock);
                rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
                spin_unlock_bh(&xprt->transport_lock);
-               pprintk("RPC: %lu timeout\n", jiffies);
                status = -ETIMEDOUT;
        }
 
@@ -479,9 +477,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
        return status;
 }
 
-static void xprt_autoclose(void *args)
+static void xprt_autoclose(struct work_struct *work)
 {
-       struct rpc_xprt *xprt = (struct rpc_xprt *)args;
+       struct rpc_xprt *xprt =
+               container_of(work, struct rpc_xprt, task_cleanup);
 
        xprt_disconnect(xprt);
        xprt->ops->close(xprt);
@@ -891,39 +890,25 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i
  */
 struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to)
 {
-       int result;
        struct rpc_xprt *xprt;
        struct rpc_rqst *req;
 
-       if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) {
-               dprintk("RPC:      xprt_create_transport: no memory\n");
-               return ERR_PTR(-ENOMEM);
-       }
-       if (size <= sizeof(xprt->addr)) {
-               memcpy(&xprt->addr, ap, size);
-               xprt->addrlen = size;
-       } else {
-               kfree(xprt);
-               dprintk("RPC:      xprt_create_transport: address too large\n");
-               return ERR_PTR(-EBADF);
-       }
-
        switch (proto) {
        case IPPROTO_UDP:
-               result = xs_setup_udp(xprt, to);
+               xprt = xs_setup_udp(ap, size, to);
                break;
        case IPPROTO_TCP:
-               result = xs_setup_tcp(xprt, to);
+               xprt = xs_setup_tcp(ap, size, to);
                break;
        default:
                printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
                                proto);
                return ERR_PTR(-EIO);
        }
-       if (result) {
-               kfree(xprt);
-               dprintk("RPC:      xprt_create_transport: failed, %d\n", result);
-               return ERR_PTR(result);
+       if (IS_ERR(xprt)) {
+               dprintk("RPC:      xprt_create_transport: failed, %ld\n",
+                               -PTR_ERR(xprt));
+               return xprt;
        }
 
        kref_init(&xprt->kref);
@@ -932,7 +917,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si
 
        INIT_LIST_HEAD(&xprt->free);
        INIT_LIST_HEAD(&xprt->recv);
-       INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
+       INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
        init_timer(&xprt->timer);
        xprt->timer.function = xprt_init_autodisconnect;
        xprt->timer.data = (unsigned long) xprt;
@@ -969,8 +954,11 @@ static void xprt_destroy(struct kref *kref)
        dprintk("RPC:      destroying transport %p\n", xprt);
        xprt->shutdown = 1;
        del_timer_sync(&xprt->timer);
+
+       /*
+        * Tear down transport state and free the rpc_xprt
+        */
        xprt->ops->destroy(xprt);
-       kfree(xprt);
 }
 
 /**
index 757fc91ef25d8621e9af5f1e162bda754b90f0fa..49cabffd7fdb87a884a9416b9a46b5848d509506 100644 (file)
@@ -45,6 +45,92 @@ unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
 unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
 unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
 
+/*
+ * We can register our own files under /proc/sys/sunrpc by
+ * calling register_sysctl_table() again.  The files in that
+ * directory become the union of all files registered there.
+ *
+ * We simply need to make sure that we don't collide with
+ * someone else's file names!
+ */
+
+#ifdef RPC_DEBUG
+
+static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
+static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
+static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
+
+static struct ctl_table_header *sunrpc_table_header;
+
+/*
+ * FIXME: changing the UDP slot table size should also resize the UDP
+ *        socket buffers for existing UDP transports
+ */
+static ctl_table xs_tunables_table[] = {
+       {
+               .ctl_name       = CTL_SLOTTABLE_UDP,
+               .procname       = "udp_slot_table_entries",
+               .data           = &xprt_udp_slot_table_entries,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &min_slot_table_size,
+               .extra2         = &max_slot_table_size
+       },
+       {
+               .ctl_name       = CTL_SLOTTABLE_TCP,
+               .procname       = "tcp_slot_table_entries",
+               .data           = &xprt_tcp_slot_table_entries,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &min_slot_table_size,
+               .extra2         = &max_slot_table_size
+       },
+       {
+               .ctl_name       = CTL_MIN_RESVPORT,
+               .procname       = "min_resvport",
+               .data           = &xprt_min_resvport,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &xprt_min_resvport_limit,
+               .extra2         = &xprt_max_resvport_limit
+       },
+       {
+               .ctl_name       = CTL_MAX_RESVPORT,
+               .procname       = "max_resvport",
+               .data           = &xprt_max_resvport,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec_minmax,
+               .strategy       = &sysctl_intvec,
+               .extra1         = &xprt_min_resvport_limit,
+               .extra2         = &xprt_max_resvport_limit
+       },
+       {
+               .ctl_name = 0,
+       },
+};
+
+static ctl_table sunrpc_table[] = {
+       {
+               .ctl_name       = CTL_SUNRPC,
+               .procname       = "sunrpc",
+               .mode           = 0555,
+               .child          = xs_tunables_table
+       },
+       {
+               .ctl_name = 0,
+       },
+};
+
+#endif
+
 /*
  * How many times to try sending a request on a socket before waiting
  * for the socket buffer to clear.
@@ -125,6 +211,55 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 }
 #endif
 
+struct sock_xprt {
+       struct rpc_xprt         xprt;
+
+       /*
+        * Network layer
+        */
+       struct socket *         sock;
+       struct sock *           inet;
+
+       /*
+        * State of TCP reply receive
+        */
+       __be32                  tcp_fraghdr,
+                               tcp_xid;
+
+       u32                     tcp_offset,
+                               tcp_reclen;
+
+       unsigned long           tcp_copied,
+                               tcp_flags;
+
+       /*
+        * Connection of transports
+        */
+       struct delayed_work     connect_worker;
+       unsigned short          port;
+
+       /*
+        * UDP socket buffer size parameters
+        */
+       size_t                  rcvsize,
+                               sndsize;
+
+       /*
+        * Saved socket callback addresses
+        */
+       void                    (*old_data_ready)(struct sock *, int);
+       void                    (*old_state_change)(struct sock *);
+       void                    (*old_write_space)(struct sock *);
+};
+
+/*
+ * TCP receive state flags
+ */
+#define TCP_RCV_LAST_FRAG      (1UL << 0)
+#define TCP_RCV_COPY_FRAGHDR   (1UL << 1)
+#define TCP_RCV_COPY_XID       (1UL << 2)
+#define TCP_RCV_COPY_DATA      (1UL << 3)
+
 static void xs_format_peer_addresses(struct rpc_xprt *xprt)
 {
        struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
@@ -168,37 +303,52 @@ static void xs_free_peer_addresses(struct rpc_xprt *xprt)
 
 #define XS_SENDMSG_FLAGS       (MSG_DONTWAIT | MSG_NOSIGNAL)
 
-static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
 {
-       struct kvec iov = {
-               .iov_base       = xdr->head[0].iov_base + base,
-               .iov_len        = len - base,
-       };
        struct msghdr msg = {
                .msg_name       = addr,
                .msg_namelen    = addrlen,
-               .msg_flags      = XS_SENDMSG_FLAGS,
+               .msg_flags      = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
+       };
+       struct kvec iov = {
+               .iov_base       = vec->iov_base + base,
+               .iov_len        = vec->iov_len - base,
        };
 
-       if (xdr->len > len)
-               msg.msg_flags |= MSG_MORE;
-
-       if (likely(iov.iov_len))
+       if (iov.iov_len != 0)
                return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
        return kernel_sendmsg(sock, &msg, NULL, 0, 0);
 }
 
-static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len)
+static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
 {
-       struct kvec iov = {
-               .iov_base       = xdr->tail[0].iov_base + base,
-               .iov_len        = len - base,
-       };
-       struct msghdr msg = {
-               .msg_flags      = XS_SENDMSG_FLAGS,
-       };
+       struct page **ppage;
+       unsigned int remainder;
+       int err, sent = 0;
+
+       remainder = xdr->page_len - base;
+       base += xdr->page_base;
+       ppage = xdr->pages + (base >> PAGE_SHIFT);
+       base &= ~PAGE_MASK;
+       for(;;) {
+               unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
+               int flags = XS_SENDMSG_FLAGS;
 
-       return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
+               remainder -= len;
+               if (remainder != 0 || more)
+                       flags |= MSG_MORE;
+               err = sock->ops->sendpage(sock, *ppage, base, len, flags);
+               if (remainder == 0 || err != len)
+                       break;
+               sent += err;
+               ppage++;
+               base = 0;
+       }
+       if (sent == 0)
+               return err;
+       if (err > 0)
+               sent += err;
+       return sent;
 }
 
 /**
@@ -210,76 +360,51 @@ static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int b
  * @base: starting position in the buffer
  *
  */
-static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
+static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
 {
-       struct page **ppage = xdr->pages;
-       unsigned int len, pglen = xdr->page_len;
-       int err, ret = 0;
+       unsigned int remainder = xdr->len - base;
+       int err, sent = 0;
 
        if (unlikely(!sock))
                return -ENOTCONN;
 
        clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+       if (base != 0) {
+               addr = NULL;
+               addrlen = 0;
+       }
 
-       len = xdr->head[0].iov_len;
-       if (base < len || (addr != NULL && base == 0)) {
-               err = xs_send_head(sock, addr, addrlen, xdr, base, len);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-               if (err != (len - base))
+       if (base < xdr->head[0].iov_len || addr != NULL) {
+               unsigned int len = xdr->head[0].iov_len - base;
+               remainder -= len;
+               err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
+               if (remainder == 0 || err != len)
                        goto out;
+               sent += err;
                base = 0;
        } else
-               base -= len;
+               base -= xdr->head[0].iov_len;
 
-       if (unlikely(pglen == 0))
-               goto copy_tail;
-       if (unlikely(base >= pglen)) {
-               base -= pglen;
-               goto copy_tail;
-       }
-       if (base || xdr->page_base) {
-               pglen -= base;
-               base += xdr->page_base;
-               ppage += base >> PAGE_CACHE_SHIFT;
-               base &= ~PAGE_CACHE_MASK;
-       }
-
-       do {
-               int flags = XS_SENDMSG_FLAGS;
-
-               len = PAGE_CACHE_SIZE;
-               if (base)
-                       len -= base;
-               if (pglen < len)
-                       len = pglen;
-
-               if (pglen != len || xdr->tail[0].iov_len != 0)
-                       flags |= MSG_MORE;
-
-               err = kernel_sendpage(sock, *ppage, base, len, flags);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-               if (err != len)
+       if (base < xdr->page_len) {
+               unsigned int len = xdr->page_len - base;
+               remainder -= len;
+               err = xs_send_pagedata(sock, xdr, base, remainder != 0);
+               if (remainder == 0 || err != len)
                        goto out;
+               sent += err;
                base = 0;
-               ppage++;
-       } while ((pglen -= len) != 0);
-copy_tail:
-       len = xdr->tail[0].iov_len;
-       if (base < len) {
-               err = xs_send_tail(sock, xdr, base, len);
-               if (ret == 0)
-                       ret = err;
-               else if (err > 0)
-                       ret += err;
-       }
+       } else
+               base -= xdr->page_len;
+
+       if (base >= xdr->tail[0].iov_len)
+               return sent;
+       err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
 out:
-       return ret;
+       if (sent == 0)
+               return err;
+       if (err > 0)
+               sent += err;
+       return sent;
 }
 
 /**
@@ -291,19 +416,20 @@ static void xs_nospace(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
        dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
                        task->tk_pid, req->rq_slen - req->rq_bytes_sent,
                        req->rq_slen);
 
-       if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) {
+       if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
                /* Protect against races with write_space */
                spin_lock_bh(&xprt->transport_lock);
 
                /* Don't race with disconnect */
                if (!xprt_connected(xprt))
                        task->tk_status = -ENOTCONN;
-               else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags))
+               else if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
                        xprt_wait_for_buffer_space(task);
 
                spin_unlock_bh(&xprt->transport_lock);
@@ -327,6 +453,7 @@ static int xs_udp_send_request(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        struct xdr_buf *xdr = &req->rq_snd_buf;
        int status;
 
@@ -335,8 +462,10 @@ static int xs_udp_send_request(struct rpc_task *task)
                                req->rq_svec->iov_len);
 
        req->rq_xtime = jiffies;
-       status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr,
-                               xprt->addrlen, xdr, req->rq_bytes_sent);
+       status = xs_sendpages(transport->sock,
+                             (struct sockaddr *) &xprt->addr,
+                             xprt->addrlen, xdr,
+                             req->rq_bytes_sent);
 
        dprintk("RPC:      xs_udp_send_request(%u) = %d\n",
                        xdr->len - req->rq_bytes_sent, status);
@@ -392,6 +521,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        struct xdr_buf *xdr = &req->rq_snd_buf;
        int status, retry = 0;
 
@@ -406,8 +536,8 @@ static int xs_tcp_send_request(struct rpc_task *task)
         * called sendmsg(). */
        while (1) {
                req->rq_xtime = jiffies;
-               status = xs_sendpages(xprt->sock, NULL, 0, xdr,
-                                               req->rq_bytes_sent);
+               status = xs_sendpages(transport->sock,
+                                       NULL, 0, xdr, req->rq_bytes_sent);
 
                dprintk("RPC:      xs_tcp_send_request(%u) = %d\n",
                                xdr->len - req->rq_bytes_sent, status);
@@ -485,8 +615,9 @@ out_release:
  */
 static void xs_close(struct rpc_xprt *xprt)
 {
-       struct socket *sock = xprt->sock;
-       struct sock *sk = xprt->inet;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       struct socket *sock = transport->sock;
+       struct sock *sk = transport->inet;
 
        if (!sk)
                goto clear_close_wait;
@@ -494,13 +625,13 @@ static void xs_close(struct rpc_xprt *xprt)
        dprintk("RPC:      xs_close xprt %p\n", xprt);
 
        write_lock_bh(&sk->sk_callback_lock);
-       xprt->inet = NULL;
-       xprt->sock = NULL;
+       transport->inet = NULL;
+       transport->sock = NULL;
 
        sk->sk_user_data = NULL;
-       sk->sk_data_ready = xprt->old_data_ready;
-       sk->sk_state_change = xprt->old_state_change;
-       sk->sk_write_space = xprt->old_write_space;
+       sk->sk_data_ready = transport->old_data_ready;
+       sk->sk_state_change = transport->old_state_change;
+       sk->sk_write_space = transport->old_write_space;
        write_unlock_bh(&sk->sk_callback_lock);
 
        sk->sk_no_check = 0;
@@ -519,15 +650,18 @@ clear_close_wait:
  */
 static void xs_destroy(struct rpc_xprt *xprt)
 {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
        dprintk("RPC:      xs_destroy xprt %p\n", xprt);
 
-       cancel_delayed_work(&xprt->connect_worker);
+       cancel_delayed_work(&transport->connect_worker);
        flush_scheduled_work();
 
        xprt_disconnect(xprt);
        xs_close(xprt);
        xs_free_peer_addresses(xprt);
        kfree(xprt->slot);
+       kfree(xprt);
 }
 
 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -603,91 +737,75 @@ static void xs_udp_data_ready(struct sock *sk, int len)
        read_unlock(&sk->sk_callback_lock);
 }
 
-static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
-{
-       if (len > desc->count)
-               len = desc->count;
-       if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
-               dprintk("RPC:      failed to copy %zu bytes from skb. %zu bytes remain\n",
-                               len, desc->count);
-               return 0;
-       }
-       desc->offset += len;
-       desc->count -= len;
-       dprintk("RPC:      copied %zu bytes from skb. %zu bytes remain\n",
-                       len, desc->count);
-       return len;
-}
-
-static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
 {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        size_t len, used;
        char *p;
 
-       p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
-       len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
-       used = xs_tcp_copy_data(desc, p, len);
-       xprt->tcp_offset += used;
+       p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
+       len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
+       used = xdr_skb_read_bits(desc, p, len);
+       transport->tcp_offset += used;
        if (used != len)
                return;
 
-       xprt->tcp_reclen = ntohl(xprt->tcp_recm);
-       if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
-               xprt->tcp_flags |= XPRT_LAST_FRAG;
+       transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
+       if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
+               transport->tcp_flags |= TCP_RCV_LAST_FRAG;
        else
-               xprt->tcp_flags &= ~XPRT_LAST_FRAG;
-       xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
+               transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
+       transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
 
-       xprt->tcp_flags &= ~XPRT_COPY_RECM;
-       xprt->tcp_offset = 0;
+       transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
+       transport->tcp_offset = 0;
 
        /* Sanity check of the record length */
-       if (unlikely(xprt->tcp_reclen < 4)) {
+       if (unlikely(transport->tcp_reclen < 4)) {
                dprintk("RPC:      invalid TCP record fragment length\n");
                xprt_disconnect(xprt);
                return;
        }
        dprintk("RPC:      reading TCP record fragment of length %d\n",
-                       xprt->tcp_reclen);
+                       transport->tcp_reclen);
 }
 
-static void xs_tcp_check_recm(struct rpc_xprt *xprt)
+static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
 {
-       dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n",
-                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags);
-       if (xprt->tcp_offset == xprt->tcp_reclen) {
-               xprt->tcp_flags |= XPRT_COPY_RECM;
-               xprt->tcp_offset = 0;
-               if (xprt->tcp_flags & XPRT_LAST_FRAG) {
-                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
-                       xprt->tcp_flags |= XPRT_COPY_XID;
-                       xprt->tcp_copied = 0;
+       if (transport->tcp_offset == transport->tcp_reclen) {
+               transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
+               transport->tcp_offset = 0;
+               if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
+                       transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+                       transport->tcp_flags |= TCP_RCV_COPY_XID;
+                       transport->tcp_copied = 0;
                }
        }
 }
 
-static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
 {
        size_t len, used;
        char *p;
 
-       len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
+       len = sizeof(transport->tcp_xid) - transport->tcp_offset;
        dprintk("RPC:      reading XID (%Zu bytes)\n", len);
-       p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
-       used = xs_tcp_copy_data(desc, p, len);
-       xprt->tcp_offset += used;
+       p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
+       used = xdr_skb_read_bits(desc, p, len);
+       transport->tcp_offset += used;
        if (used != len)
                return;
-       xprt->tcp_flags &= ~XPRT_COPY_XID;
-       xprt->tcp_flags |= XPRT_COPY_DATA;
-       xprt->tcp_copied = 4;
+       transport->tcp_flags &= ~TCP_RCV_COPY_XID;
+       transport->tcp_flags |= TCP_RCV_COPY_DATA;
+       transport->tcp_copied = 4;
        dprintk("RPC:      reading reply for XID %08x\n",
-                                               ntohl(xprt->tcp_xid));
-       xs_tcp_check_recm(xprt);
+                       ntohl(transport->tcp_xid));
+       xs_tcp_check_fraghdr(transport);
 }
 
-static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
 {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        struct rpc_rqst *req;
        struct xdr_buf *rcvbuf;
        size_t len;
@@ -695,116 +813,118 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc
 
        /* Find and lock the request corresponding to this xid */
        spin_lock(&xprt->transport_lock);
-       req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
+       req = xprt_lookup_rqst(xprt, transport->tcp_xid);
        if (!req) {
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
+               transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
                dprintk("RPC:      XID %08x request not found!\n",
-                               ntohl(xprt->tcp_xid));
+                               ntohl(transport->tcp_xid));
                spin_unlock(&xprt->transport_lock);
                return;
        }
 
        rcvbuf = &req->rq_private_buf;
        len = desc->count;
-       if (len > xprt->tcp_reclen - xprt->tcp_offset) {
-               skb_reader_t my_desc;
+       if (len > transport->tcp_reclen - transport->tcp_offset) {
+               struct xdr_skb_reader my_desc;
 
-               len = xprt->tcp_reclen - xprt->tcp_offset;
+               len = transport->tcp_reclen - transport->tcp_offset;
                memcpy(&my_desc, desc, sizeof(my_desc));
                my_desc.count = len;
-               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-                                         &my_desc, xs_tcp_copy_data);
+               r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
+                                         &my_desc, xdr_skb_read_bits);
                desc->count -= r;
                desc->offset += r;
        } else
-               r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied,
-                                         desc, xs_tcp_copy_data);
+               r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
+                                         desc, xdr_skb_read_bits);
 
        if (r > 0) {
-               xprt->tcp_copied += r;
-               xprt->tcp_offset += r;
+               transport->tcp_copied += r;
+               transport->tcp_offset += r;
        }
        if (r != len) {
                /* Error when copying to the receive buffer,
                 * usually because we weren't able to allocate
                 * additional buffer pages. All we can do now
-                * is turn off XPRT_COPY_DATA, so the request
+                * is turn off TCP_RCV_COPY_DATA, so the request
                 * will not receive any additional updates,
                 * and time out.
                 * Any remaining data from this record will
                 * be discarded.
                 */
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
+               transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
                dprintk("RPC:      XID %08x truncated request\n",
-                               ntohl(xprt->tcp_xid));
+                               ntohl(transport->tcp_xid));
                dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-                               xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
+                               xprt, transport->tcp_copied, transport->tcp_offset,
+                                       transport->tcp_reclen);
                goto out;
        }
 
        dprintk("RPC:      XID %08x read %Zd bytes\n",
-                       ntohl(xprt->tcp_xid), r);
+                       ntohl(transport->tcp_xid), r);
        dprintk("RPC:      xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
-                       xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen);
-
-       if (xprt->tcp_copied == req->rq_private_buf.buflen)
-               xprt->tcp_flags &= ~XPRT_COPY_DATA;
-       else if (xprt->tcp_offset == xprt->tcp_reclen) {
-               if (xprt->tcp_flags & XPRT_LAST_FRAG)
-                       xprt->tcp_flags &= ~XPRT_COPY_DATA;
+                       xprt, transport->tcp_copied, transport->tcp_offset,
+                               transport->tcp_reclen);
+
+       if (transport->tcp_copied == req->rq_private_buf.buflen)
+               transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+       else if (transport->tcp_offset == transport->tcp_reclen) {
+               if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
+                       transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
        }
 
 out:
-       if (!(xprt->tcp_flags & XPRT_COPY_DATA))
-               xprt_complete_rqst(req->rq_task, xprt->tcp_copied);
+       if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
+               xprt_complete_rqst(req->rq_task, transport->tcp_copied);
        spin_unlock(&xprt->transport_lock);
-       xs_tcp_check_recm(xprt);
+       xs_tcp_check_fraghdr(transport);
 }
 
-static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
+static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
 {
        size_t len;
 
-       len = xprt->tcp_reclen - xprt->tcp_offset;
+       len = transport->tcp_reclen - transport->tcp_offset;
        if (len > desc->count)
                len = desc->count;
        desc->count -= len;
        desc->offset += len;
-       xprt->tcp_offset += len;
+       transport->tcp_offset += len;
        dprintk("RPC:      discarded %Zu bytes\n", len);
-       xs_tcp_check_recm(xprt);
+       xs_tcp_check_fraghdr(transport);
 }
 
 static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
 {
        struct rpc_xprt *xprt = rd_desc->arg.data;
-       skb_reader_t desc = {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       struct xdr_skb_reader desc = {
                .skb    = skb,
                .offset = offset,
                .count  = len,
-               .csum   = 0
        };
 
        dprintk("RPC:      xs_tcp_data_recv started\n");
        do {
                /* Read in a new fragment marker if necessary */
                /* Can we ever really expect to get completely empty fragments? */
-               if (xprt->tcp_flags & XPRT_COPY_RECM) {
+               if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
                        xs_tcp_read_fraghdr(xprt, &desc);
                        continue;
                }
                /* Read in the xid if necessary */
-               if (xprt->tcp_flags & XPRT_COPY_XID) {
-                       xs_tcp_read_xid(xprt, &desc);
+               if (transport->tcp_flags & TCP_RCV_COPY_XID) {
+                       xs_tcp_read_xid(transport, &desc);
                        continue;
                }
                /* Read in the request data */
-               if (xprt->tcp_flags & XPRT_COPY_DATA) {
+               if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
                        xs_tcp_read_request(xprt, &desc);
                        continue;
                }
                /* Skip over any trailing bytes on short reads */
-               xs_tcp_read_discard(xprt, &desc);
+               xs_tcp_read_discard(transport, &desc);
        } while (desc.count);
        dprintk("RPC:      xs_tcp_data_recv done\n");
        return len - desc.count;
@@ -858,11 +978,16 @@ static void xs_tcp_state_change(struct sock *sk)
        case TCP_ESTABLISHED:
                spin_lock_bh(&xprt->transport_lock);
                if (!xprt_test_and_set_connected(xprt)) {
+                       struct sock_xprt *transport = container_of(xprt,
+                                       struct sock_xprt, xprt);
+
                        /* Reset TCP record info */
-                       xprt->tcp_offset = 0;
-                       xprt->tcp_reclen = 0;
-                       xprt->tcp_copied = 0;
-                       xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
+                       transport->tcp_offset = 0;
+                       transport->tcp_reclen = 0;
+                       transport->tcp_copied = 0;
+                       transport->tcp_flags =
+                               TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+
                        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
                        xprt_wake_pending_tasks(xprt, 0);
                }
@@ -951,15 +1076,16 @@ static void xs_tcp_write_space(struct sock *sk)
 
 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
 {
-       struct sock *sk = xprt->inet;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       struct sock *sk = transport->inet;
 
-       if (xprt->rcvsize) {
+       if (transport->rcvsize) {
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-               sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs *  2;
+               sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
        }
-       if (xprt->sndsize) {
+       if (transport->sndsize) {
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-               sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2;
+               sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
                sk->sk_write_space(sk);
        }
 }
@@ -974,12 +1100,14 @@ static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
  */
 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
 {
-       xprt->sndsize = 0;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
+       transport->sndsize = 0;
        if (sndsize)
-               xprt->sndsize = sndsize + 1024;
-       xprt->rcvsize = 0;
+               transport->sndsize = sndsize + 1024;
+       transport->rcvsize = 0;
        if (rcvsize)
-               xprt->rcvsize = rcvsize + 1024;
+               transport->rcvsize = rcvsize + 1024;
 
        xs_udp_do_set_buffer_size(xprt);
 }
@@ -1002,19 +1130,6 @@ static unsigned short xs_get_random_port(void)
        return rand + xprt_min_resvport;
 }
 
-/**
- * xs_print_peer_address - format an IPv4 address for printing
- * @xprt: generic transport
- * @format: flags field indicating which parts of the address to render
- */
-static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
-{
-       if (xprt->address_strings[format] != NULL)
-               return xprt->address_strings[format];
-       else
-               return "unprintable";
-}
-
 /**
  * xs_set_port - reset the port number in the remote endpoint address
  * @xprt: generic transport
@@ -1030,20 +1145,20 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
        sap->sin_port = htons(port);
 }
 
-static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
+static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock)
 {
        struct sockaddr_in myaddr = {
                .sin_family = AF_INET,
        };
        int err;
-       unsigned short port = xprt->port;
+       unsigned short port = transport->port;
 
        do {
                myaddr.sin_port = htons(port);
                err = kernel_bind(sock, (struct sockaddr *) &myaddr,
                                                sizeof(myaddr));
                if (err == 0) {
-                       xprt->port = port;
+                       transport->port = port;
                        dprintk("RPC:      xs_bindresvport bound to port %u\n",
                                        port);
                        return 0;
@@ -1052,22 +1167,53 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
                        port = xprt_max_resvport;
                else
                        port--;
-       } while (err == -EADDRINUSE && port != xprt->port);
+       } while (err == -EADDRINUSE && port != transport->port);
 
        dprintk("RPC:      can't bind to reserved port (%d).\n", -err);
        return err;
 }
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       BUG_ON(sk->sk_lock.owner != NULL);
+       switch (sk->sk_family) {
+       case AF_INET:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+                       &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+               break;
+
+       case AF_INET6:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+                       &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+               break;
+
+       default:
+               BUG();
+       }
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /**
  * xs_udp_connect_worker - set up a UDP socket
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_udp_connect_worker(void *args)
+static void xs_udp_connect_worker(struct work_struct *work)
 {
-       struct rpc_xprt *xprt = (struct rpc_xprt *) args;
-       struct socket *sock = xprt->sock;
+       struct sock_xprt *transport =
+               container_of(work, struct sock_xprt, connect_worker.work);
+       struct rpc_xprt *xprt = &transport->xprt;
+       struct socket *sock = transport->sock;
        int err, status = -EIO;
 
        if (xprt->shutdown || !xprt_bound(xprt))
@@ -1080,24 +1226,25 @@ static void xs_udp_connect_worker(void *args)
                dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
                goto out;
        }
+       xs_reclassify_socket(sock);
 
-       if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+       if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
                sock_release(sock);
                goto out;
        }
 
        dprintk("RPC:      worker connecting xprt %p to address: %s\n",
-                       xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+                       xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
-       if (!xprt->inet) {
+       if (!transport->inet) {
                struct sock *sk = sock->sk;
 
                write_lock_bh(&sk->sk_callback_lock);
 
                sk->sk_user_data = xprt;
-               xprt->old_data_ready = sk->sk_data_ready;
-               xprt->old_state_change = sk->sk_state_change;
-               xprt->old_write_space = sk->sk_write_space;
+               transport->old_data_ready = sk->sk_data_ready;
+               transport->old_state_change = sk->sk_state_change;
+               transport->old_write_space = sk->sk_write_space;
                sk->sk_data_ready = xs_udp_data_ready;
                sk->sk_write_space = xs_udp_write_space;
                sk->sk_no_check = UDP_CSUM_NORCV;
@@ -1106,8 +1253,8 @@ static void xs_udp_connect_worker(void *args)
                xprt_set_connected(xprt);
 
                /* Reset to new socket */
-               xprt->sock = sock;
-               xprt->inet = sk;
+               transport->sock = sock;
+               transport->inet = sk;
 
                write_unlock_bh(&sk->sk_callback_lock);
        }
@@ -1125,7 +1272,7 @@ out:
 static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
 {
        int result;
-       struct socket *sock = xprt->sock;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        struct sockaddr any;
 
        dprintk("RPC:      disconnecting xprt %p to reuse port\n", xprt);
@@ -1136,7 +1283,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
         */
        memset(&any, 0, sizeof(any));
        any.sa_family = AF_UNSPEC;
-       result = kernel_connect(sock, &any, sizeof(any), 0);
+       result = kernel_connect(transport->sock, &any, sizeof(any), 0);
        if (result)
                dprintk("RPC:      AF_UNSPEC connect return code %d\n",
                                result);
@@ -1144,27 +1291,30 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
 
 /**
  * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint
- * @args: RPC transport to connect
+ * @work: RPC transport to connect
  *
  * Invoked by a work queue tasklet.
  */
-static void xs_tcp_connect_worker(void *args)
+static void xs_tcp_connect_worker(struct work_struct *work)
 {
-       struct rpc_xprt *xprt = (struct rpc_xprt *)args;
-       struct socket *sock = xprt->sock;
+       struct sock_xprt *transport =
+               container_of(work, struct sock_xprt, connect_worker.work);
+       struct rpc_xprt *xprt = &transport->xprt;
+       struct socket *sock = transport->sock;
        int err, status = -EIO;
 
        if (xprt->shutdown || !xprt_bound(xprt))
                goto out;
 
-       if (!xprt->sock) {
+       if (!sock) {
                /* start from scratch */
                if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
                        dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
                        goto out;
                }
+               xs_reclassify_socket(sock);
 
-               if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
+               if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
                        sock_release(sock);
                        goto out;
                }
@@ -1173,17 +1323,17 @@ static void xs_tcp_connect_worker(void *args)
                xs_tcp_reuse_connection(xprt);
 
        dprintk("RPC:      worker connecting xprt %p to address: %s\n",
-                       xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+                       xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
 
-       if (!xprt->inet) {
+       if (!transport->inet) {
                struct sock *sk = sock->sk;
 
                write_lock_bh(&sk->sk_callback_lock);
 
                sk->sk_user_data = xprt;
-               xprt->old_data_ready = sk->sk_data_ready;
-               xprt->old_state_change = sk->sk_state_change;
-               xprt->old_write_space = sk->sk_write_space;
+               transport->old_data_ready = sk->sk_data_ready;
+               transport->old_state_change = sk->sk_state_change;
+               transport->old_write_space = sk->sk_write_space;
                sk->sk_data_ready = xs_tcp_data_ready;
                sk->sk_state_change = xs_tcp_state_change;
                sk->sk_write_space = xs_tcp_write_space;
@@ -1198,8 +1348,8 @@ static void xs_tcp_connect_worker(void *args)
                xprt_clear_connected(xprt);
 
                /* Reset to new socket */
-               xprt->sock = sock;
-               xprt->inet = sk;
+               transport->sock = sock;
+               transport->inet = sk;
 
                write_unlock_bh(&sk->sk_callback_lock);
        }
@@ -1248,21 +1398,22 @@ out_clear:
 static void xs_connect(struct rpc_task *task)
 {
        struct rpc_xprt *xprt = task->tk_xprt;
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
        if (xprt_test_and_set_connecting(xprt))
                return;
 
-       if (xprt->sock != NULL) {
+       if (transport->sock != NULL) {
                dprintk("RPC:      xs_connect delayed xprt %p for %lu seconds\n",
                                xprt, xprt->reestablish_timeout / HZ);
-               schedule_delayed_work(&xprt->connect_worker,
+               schedule_delayed_work(&transport->connect_worker,
                                        xprt->reestablish_timeout);
                xprt->reestablish_timeout <<= 1;
                if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
                        xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
        } else {
                dprintk("RPC:      xs_connect scheduled xprt %p\n", xprt);
-               schedule_work(&xprt->connect_worker);
+               schedule_delayed_work(&transport->connect_worker, 0);
 
                /* flush_scheduled_work can sleep... */
                if (!RPC_IS_ASYNC(task))
@@ -1278,8 +1429,10 @@ static void xs_connect(struct rpc_task *task)
  */
 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+
        seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
-                       xprt->port,
+                       transport->port,
                        xprt->stat.bind_count,
                        xprt->stat.sends,
                        xprt->stat.recvs,
@@ -1296,13 +1449,14 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  */
 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 {
+       struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
        long idle_time = 0;
 
        if (xprt_connected(xprt))
                idle_time = (long)(jiffies - xprt->last_used) / HZ;
 
        seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
-                       xprt->port,
+                       transport->port,
                        xprt->stat.bind_count,
                        xprt->stat.connect_count,
                        xprt->stat.connect_time,
@@ -1316,7 +1470,6 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 
 static struct rpc_xprt_ops xs_udp_ops = {
        .set_buffer_size        = xs_udp_set_buffer_size,
-       .print_addr             = xs_print_peer_address,
        .reserve_xprt           = xprt_reserve_xprt_cong,
        .release_xprt           = xprt_release_xprt_cong,
        .rpcbind                = rpc_getport,
@@ -1334,7 +1487,6 @@ static struct rpc_xprt_ops xs_udp_ops = {
 };
 
 static struct rpc_xprt_ops xs_tcp_ops = {
-       .print_addr             = xs_print_peer_address,
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
        .rpcbind                = rpc_getport,
@@ -1349,33 +1501,64 @@ static struct rpc_xprt_ops xs_tcp_ops = {
        .print_stats            = xs_tcp_print_stats,
 };
 
+static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size)
+{
+       struct rpc_xprt *xprt;
+       struct sock_xprt *new;
+
+       if (addrlen > sizeof(xprt->addr)) {
+               dprintk("RPC:      xs_setup_xprt: address too large\n");
+               return ERR_PTR(-EBADF);
+       }
+
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (new == NULL) {
+               dprintk("RPC:      xs_setup_xprt: couldn't allocate rpc_xprt\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       xprt = &new->xprt;
+
+       xprt->max_reqs = slot_table_size;
+       xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
+       if (xprt->slot == NULL) {
+               kfree(xprt);
+               dprintk("RPC:      xs_setup_xprt: couldn't allocate slot table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       memcpy(&xprt->addr, addr, addrlen);
+       xprt->addrlen = addrlen;
+       new->port = xs_get_random_port();
+
+       return xprt;
+}
+
 /**
  * xs_setup_udp - Set up transport to use a UDP socket
- * @xprt: transport to set up
+ * @addr: address of remote server
+ * @addrlen: length of address in bytes
  * @to:   timeout parameters
  *
  */
-int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
 {
-       size_t slot_table_size;
-       struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
+       struct rpc_xprt *xprt;
+       struct sock_xprt *transport;
 
-       xprt->max_reqs = xprt_udp_slot_table_entries;
-       slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
-       xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
-       if (xprt->slot == NULL)
-               return -ENOMEM;
+       xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries);
+       if (IS_ERR(xprt))
+               return xprt;
+       transport = container_of(xprt, struct sock_xprt, xprt);
 
-       if (ntohs(addr->sin_port) != 0)
+       if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
                xprt_set_bound(xprt);
-       xprt->port = xs_get_random_port();
 
        xprt->prot = IPPROTO_UDP;
        xprt->tsh_size = 0;
        /* XXX: header size can vary due to auth type, IPv6, etc. */
        xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
 
-       INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt);
+       INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker);
        xprt->bind_timeout = XS_BIND_TO;
        xprt->connect_timeout = XS_UDP_CONN_TO;
        xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1390,37 +1573,36 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
 
        xs_format_peer_addresses(xprt);
        dprintk("RPC:      set up transport to address %s\n",
-                       xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+                       xprt->address_strings[RPC_DISPLAY_ALL]);
 
-       return 0;
+       return xprt;
 }
 
 /**
  * xs_setup_tcp - Set up transport to use a TCP socket
- * @xprt: transport to set up
+ * @addr: address of remote server
+ * @addrlen: length of address in bytes
  * @to: timeout parameters
  *
  */
-int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
+struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
 {
-       size_t slot_table_size;
-       struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
+       struct rpc_xprt *xprt;
+       struct sock_xprt *transport;
 
-       xprt->max_reqs = xprt_tcp_slot_table_entries;
-       slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]);
-       xprt->slot = kzalloc(slot_table_size, GFP_KERNEL);
-       if (xprt->slot == NULL)
-               return -ENOMEM;
+       xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries);
+       if (IS_ERR(xprt))
+               return xprt;
+       transport = container_of(xprt, struct sock_xprt, xprt);
 
-       if (ntohs(addr->sin_port) != 0)
+       if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
                xprt_set_bound(xprt);
-       xprt->port = xs_get_random_port();
 
        xprt->prot = IPPROTO_TCP;
        xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
        xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
 
-       INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt);
+       INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker);
        xprt->bind_timeout = XS_BIND_TO;
        xprt->connect_timeout = XS_TCP_CONN_TO;
        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
@@ -1435,7 +1617,40 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
 
        xs_format_peer_addresses(xprt);
        dprintk("RPC:      set up transport to address %s\n",
-                       xs_print_peer_address(xprt, RPC_DISPLAY_ALL));
+                       xprt->address_strings[RPC_DISPLAY_ALL]);
+
+       return xprt;
+}
+
+/**
+ * init_socket_xprt - set up xprtsock's sysctls
+ *
+ */
+int init_socket_xprt(void)
+{
+#ifdef RPC_DEBUG
+       if (!sunrpc_table_header) {
+               sunrpc_table_header = register_sysctl_table(sunrpc_table, 1);
+#ifdef CONFIG_PROC_FS
+               if (sunrpc_table[0].de)
+                       sunrpc_table[0].de->owner = THIS_MODULE;
+#endif
+       }
+#endif
 
        return 0;
 }
+
+/**
+ * cleanup_socket_xprt - remove xprtsock's sysctls
+ *
+ */
+void cleanup_socket_xprt(void)
+{
+#ifdef RPC_DEBUG
+       if (sunrpc_table_header) {
+               unregister_sysctl_table(sunrpc_table_header);
+               sunrpc_table_header = NULL;
+       }
+#endif
+}
index ae6ddf00a1aaea8aaaa17d708e4d4eaca4bfe059..eb80778d6d9c6d22781b205a21b139bd4c87f052 100644 (file)
@@ -42,7 +42,7 @@ struct queue_item {
        unsigned long data;
 };
 
-static kmem_cache_t *tipc_queue_item_cache;
+static struct kmem_cache *tipc_queue_item_cache;
 static struct list_head signal_queue_head;
 static DEFINE_SPINLOCK(qitem_lock);
 static int handler_enabled = 0;
index 316211d9f17d97ffeab6823778c03113cdd5a76a..769cdd62c1bbab511af3c675433e2af191174840 100644 (file)
 
 #define KMEM_SAFETYZONE 8
 
-/***********FOR DEBUGGING PURPOSES*********************************************
-static void * dbg_kmalloc(unsigned int size, int prio, int line) {
-       int i = 0;
-       void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
-       char * c1 = v;
-       c1 += sizeof(unsigned int);
-       *((unsigned int *)v) = size;
-
-       for (i = 0; i < KMEM_SAFETYZONE; i++) {
-               c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
-               c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
-               c1 += 8;
-       }
-       c1 += size;
-       for (i = 0; i < KMEM_SAFETYZONE; i++) {
-               c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
-               c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
-               c1 += 8;
-       }
-       v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
-       printk(KERN_INFO "line %d  kmalloc(%d,%d) = %p\n",line,size,prio,v);
-       return v;
-}
-static void dbg_kfree(void * v, int line) {
-       unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
-       unsigned int size = *sp;
-       char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
-       int i = 0;
-       for (i = 0; i < KMEM_SAFETYZONE; i++) {
-               if (   c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
-                   || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
-                       printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
-                       printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
-                                       c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
-               }
-               c1 += 8;
-       }
-       c1 += size;
-       for (i = 0; i < KMEM_SAFETYZONE; i++) {
-               if (   c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
-                   || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
-                  ) {
-                       printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
-                       printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
-                                       c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
-               }
-               c1 += 8;
-       }
-       printk(KERN_INFO "line %d  kfree(%p)\n",line,v);
-       v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
-       kfree(v);
-}
-
-#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
-#define kfree(x) dbg_kfree(x,__LINE__)
-*****************************************************************************/
-
 /*
  *     Function Prototypes
  */
index 5a0dbeb6bbe817f02c7750ea75e5f3e6713680cf..6b381fc0383d1ea7bde417f5c4665c03a4db418b 100644 (file)
@@ -119,6 +119,23 @@ static struct xfrm_algo_desc aalg_list[] = {
                .sadb_alg_maxbits = 160
        }
 },
+{
+       .name = "xcbc(aes)",
+
+       .uinfo = {
+               .auth = {
+                       .icv_truncbits = 96,
+                       .icv_fullbits = 128,
+               }
+       },
+
+       .desc = {
+               .sadb_alg_id = SADB_X_AALG_AES_XCBC_MAC,
+               .sadb_alg_ivlen = 0,
+               .sadb_alg_minbits = 128,
+               .sadb_alg_maxbits = 128
+       }
+},
 };
 
 static struct xfrm_algo_desc ealg_list[] = {
index e8198a2c785df7416143c0781bd042f8aba995d2..414f890703802a0e2ef761df2573561f54f1b79e 100644 (file)
@@ -12,7 +12,7 @@
 #include <net/ip.h>
 #include <net/xfrm.h>
 
-static kmem_cache_t *secpath_cachep __read_mostly;
+static struct kmem_cache *secpath_cachep __read_mostly;
 
 void __secpath_destroy(struct sec_path *sp)
 {
@@ -27,7 +27,7 @@ struct sec_path *secpath_dup(struct sec_path *src)
 {
        struct sec_path *sp;
 
-       sp = kmem_cache_alloc(secpath_cachep, SLAB_ATOMIC);
+       sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
        if (!sp)
                return NULL;
 
index 64d3938f74c46d1d13753c32a3b37da63fd27918..bebd40e5a62e161f03f630adac629d8baa014d99 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/cache.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <linux/audit.h>
 
 #include "xfrm_hash.h"
 
@@ -39,7 +40,7 @@ EXPORT_SYMBOL(xfrm_policy_count);
 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
 
-static kmem_cache_t *xfrm_dst_cache __read_mostly;
+static struct kmem_cache *xfrm_dst_cache __read_mostly;
 
 static struct work_struct xfrm_policy_gc_work;
 static HLIST_HEAD(xfrm_policy_gc_list);
@@ -392,7 +393,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
        xfrm_pol_put(policy);
 }
 
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
 {
        struct xfrm_policy *policy;
        struct hlist_node *entry, *tmp;
@@ -580,7 +581,7 @@ static inline int xfrm_byidx_should_resize(int total)
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
        int dir, total;
 
@@ -597,7 +598,7 @@ static void xfrm_hash_resize(void *__unused)
        mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 /* Generate new index... KAME seems to generate them ordered by cost
  * of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -804,7 +805,7 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete)
 }
 EXPORT_SYMBOL(xfrm_policy_byid);
 
-void xfrm_policy_flush(u8 type)
+void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
 {
        int dir;
 
@@ -824,6 +825,9 @@ void xfrm_policy_flush(u8 type)
                        hlist_del(&pol->byidx);
                        write_unlock_bh(&xfrm_policy_lock);
 
+                       xfrm_audit_log(audit_info->loginuid, audit_info->secid,
+                                      AUDIT_MAC_IPSEC_DELSPD, 1, pol, NULL);
+
                        xfrm_policy_kill(pol);
                        killed++;
 
@@ -842,6 +846,11 @@ void xfrm_policy_flush(u8 type)
                                hlist_del(&pol->byidx);
                                write_unlock_bh(&xfrm_policy_lock);
 
+                               xfrm_audit_log(audit_info->loginuid,
+                                              audit_info->secid,
+                                              AUDIT_MAC_IPSEC_DELSPD, 1,
+                                              pol, NULL);
+
                                xfrm_policy_kill(pol);
                                killed++;
 
@@ -860,33 +869,12 @@ EXPORT_SYMBOL(xfrm_policy_flush);
 int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*),
                     void *data)
 {
-       struct xfrm_policy *pol;
+       struct xfrm_policy *pol, *last = NULL;
        struct hlist_node *entry;
-       int dir, count, error;
+       int dir, last_dir = 0, count, error;
 
        read_lock_bh(&xfrm_policy_lock);
        count = 0;
-       for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
-               struct hlist_head *table = xfrm_policy_bydst[dir].table;
-               int i;
-
-               hlist_for_each_entry(pol, entry,
-                                    &xfrm_policy_inexact[dir], bydst) {
-                       if (pol->type == type)
-                               count++;
-               }
-               for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
-                       hlist_for_each_entry(pol, entry, table + i, bydst) {
-                               if (pol->type == type)
-                                       count++;
-                       }
-               }
-       }
-
-       if (count == 0) {
-               error = -ENOENT;
-               goto out;
-       }
 
        for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) {
                struct hlist_head *table = xfrm_policy_bydst[dir].table;
@@ -896,21 +884,37 @@ int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*)
                                     &xfrm_policy_inexact[dir], bydst) {
                        if (pol->type != type)
                                continue;
-                       error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
-                       if (error)
-                               goto out;
+                       if (last) {
+                               error = func(last, last_dir % XFRM_POLICY_MAX,
+                                            count, data);
+                               if (error)
+                                       goto out;
+                       }
+                       last = pol;
+                       last_dir = dir;
+                       count++;
                }
                for (i = xfrm_policy_bydst[dir].hmask; i >= 0; i--) {
                        hlist_for_each_entry(pol, entry, table + i, bydst) {
                                if (pol->type != type)
                                        continue;
-                               error = func(pol, dir % XFRM_POLICY_MAX, --count, data);
-                               if (error)
-                                       goto out;
+                               if (last) {
+                                       error = func(last, last_dir % XFRM_POLICY_MAX,
+                                                    count, data);
+                                       if (error)
+                                               goto out;
+                               }
+                               last = pol;
+                               last_dir = dir;
+                               count++;
                        }
                }
        }
-       error = 0;
+       if (count == 0) {
+               error = -ENOENT;
+               goto out;
+       }
+       error = func(last, last_dir % XFRM_POLICY_MAX, 0, data);
 out:
        read_unlock_bh(&xfrm_policy_lock);
        return error;
@@ -1982,6 +1986,117 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
 
 EXPORT_SYMBOL(xfrm_bundle_ok);
 
+#ifdef CONFIG_AUDITSYSCALL
+/* Audit addition and deletion of SAs and ipsec policy */
+
+void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
+                   struct xfrm_policy *xp, struct xfrm_state *x)
+{
+
+       char *secctx;
+       u32 secctx_len;
+       struct xfrm_sec_ctx *sctx = NULL;
+       struct audit_buffer *audit_buf;
+       int family;
+       extern int audit_enabled;
+
+       if (audit_enabled == 0)
+               return;
+
+       audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
+       if (audit_buf == NULL)
+       return;
+
+       switch(type) {
+       case AUDIT_MAC_IPSEC_ADDSA:
+               audit_log_format(audit_buf, "SAD add: auid=%u", auid);
+               break;
+       case AUDIT_MAC_IPSEC_DELSA:
+               audit_log_format(audit_buf, "SAD delete: auid=%u", auid);
+               break;
+       case AUDIT_MAC_IPSEC_ADDSPD:
+               audit_log_format(audit_buf, "SPD add: auid=%u", auid);
+               break;
+       case AUDIT_MAC_IPSEC_DELSPD:
+               audit_log_format(audit_buf, "SPD delete: auid=%u", auid);
+               break;
+       default:
+               return;
+       }
+
+       if (sid != 0 &&
+               security_secid_to_secctx(sid, &secctx, &secctx_len) == 0)
+               audit_log_format(audit_buf, " subj=%s", secctx);
+       else
+               audit_log_task_context(audit_buf);
+
+       if (xp) {
+               family = xp->selector.family;
+               if (xp->security)
+                       sctx = xp->security;
+       } else {
+               family = x->props.family;
+               if (x->security)
+                       sctx = x->security;
+       }
+
+       if (sctx)
+               audit_log_format(audit_buf,
+                               " sec_alg=%u sec_doi=%u sec_obj=%s",
+                               sctx->ctx_alg, sctx->ctx_doi, sctx->ctx_str);
+
+       switch(family) {
+       case AF_INET:
+               {
+                       struct in_addr saddr, daddr;
+                       if (xp) {
+                               saddr.s_addr = xp->selector.saddr.a4;
+                               daddr.s_addr = xp->selector.daddr.a4;
+                       } else {
+                               saddr.s_addr = x->props.saddr.a4;
+                               daddr.s_addr = x->id.daddr.a4;
+                       }
+                       audit_log_format(audit_buf,
+                                        " src=%u.%u.%u.%u dst=%u.%u.%u.%u",
+                                        NIPQUAD(saddr), NIPQUAD(daddr));
+               }
+                       break;
+       case AF_INET6:
+               {
+                       struct in6_addr saddr6, daddr6;
+                       if (xp) {
+                               memcpy(&saddr6, xp->selector.saddr.a6,
+                                       sizeof(struct in6_addr));
+                               memcpy(&daddr6, xp->selector.daddr.a6,
+                                       sizeof(struct in6_addr));
+                       } else {
+                               memcpy(&saddr6, x->props.saddr.a6,
+                                       sizeof(struct in6_addr));
+                               memcpy(&daddr6, x->id.daddr.a6,
+                                       sizeof(struct in6_addr));
+                       }
+                       audit_log_format(audit_buf,
+                                        " src=" NIP6_FMT "dst=" NIP6_FMT,
+                                        NIP6(saddr6), NIP6(daddr6));
+               }
+               break;
+       }
+
+       if (x)
+               audit_log_format(audit_buf, " spi=%lu(0x%lx) protocol=%s",
+                               (unsigned long)ntohl(x->id.spi),
+                               (unsigned long)ntohl(x->id.spi),
+                               x->id.proto == IPPROTO_AH ? "AH" :
+                               (x->id.proto == IPPROTO_ESP ?
+                               "ESP" : "IPCOMP"));
+
+       audit_log_format(audit_buf, " res=%u", result);
+       audit_log_end(audit_buf);
+}
+
+EXPORT_SYMBOL(xfrm_audit_log);
+#endif /* CONFIG_AUDITSYSCALL */
+
 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 {
        int err = 0;
@@ -2116,7 +2231,7 @@ static void __init xfrm_policy_init(void)
                        panic("XFRM: failed to allocate bydst hash\n");
        }
 
-       INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+       INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
        register_netdevice_notifier(&xfrm_dev_notifier);
 }
 
index 864962bbda902a513e04c2a32d7414bdcfce3733..fdb08d9f34aa684b1180985e031e77f1de116f55 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/cache.h>
 #include <asm/uaccess.h>
+#include <linux/audit.h>
 
 #include "xfrm_hash.h"
 
@@ -115,7 +116,7 @@ static unsigned long xfrm_hash_new_size(void)
 
 static DEFINE_MUTEX(hash_resize_mutex);
 
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
 {
        struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
        unsigned long nsize, osize;
@@ -168,7 +169,7 @@ out_unlock:
        mutex_unlock(&hash_resize_mutex);
 }
 
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
 
 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
 EXPORT_SYMBOL(km_waitq);
@@ -207,7 +208,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
        kfree(x);
 }
 
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
 {
        struct xfrm_state *x;
        struct hlist_node *entry, *tmp;
@@ -238,6 +239,7 @@ static void xfrm_timer_handler(unsigned long data)
        unsigned long now = (unsigned long)xtime.tv_sec;
        long next = LONG_MAX;
        int warn = 0;
+       int err = 0;
 
        spin_lock(&x->lock);
        if (x->km.state == XFRM_STATE_DEAD)
@@ -295,9 +297,14 @@ expired:
                next = 2;
                goto resched;
        }
-       if (!__xfrm_state_delete(x) && x->id.spi)
+
+       err = __xfrm_state_delete(x);
+       if (!err && x->id.spi)
                km_state_expired(x, 1, 0);
 
+       xfrm_audit_log(audit_get_loginuid(current->audit_context), 0,
+                      AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
 out:
        spin_unlock(&x->lock);
 }
@@ -384,9 +391,10 @@ int xfrm_state_delete(struct xfrm_state *x)
 }
 EXPORT_SYMBOL(xfrm_state_delete);
 
-void xfrm_state_flush(u8 proto)
+void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info)
 {
        int i;
+       int err = 0;
 
        spin_lock_bh(&xfrm_state_lock);
        for (i = 0; i <= xfrm_state_hmask; i++) {
@@ -399,7 +407,11 @@ restart:
                                xfrm_state_hold(x);
                                spin_unlock_bh(&xfrm_state_lock);
 
-                               xfrm_state_delete(x);
+                               err = xfrm_state_delete(x);
+                               xfrm_audit_log(audit_info->loginuid,
+                                              audit_info->secid,
+                                              AUDIT_MAC_IPSEC_DELSA,
+                                              err ? 0 : 1, NULL, x);
                                xfrm_state_put(x);
 
                                spin_lock_bh(&xfrm_state_lock);
@@ -1099,7 +1111,7 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
                    void *data)
 {
        int i;
-       struct xfrm_state *x;
+       struct xfrm_state *x, *last = NULL;
        struct hlist_node *entry;
        int count = 0;
        int err = 0;
@@ -1107,24 +1119,22 @@ int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
        spin_lock_bh(&xfrm_state_lock);
        for (i = 0; i <= xfrm_state_hmask; i++) {
                hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
-                       if (xfrm_id_proto_match(x->id.proto, proto))
-                               count++;
+                       if (!xfrm_id_proto_match(x->id.proto, proto))
+                               continue;
+                       if (last) {
+                               err = func(last, count, data);
+                               if (err)
+                                       goto out;
+                       }
+                       last = x;
+                       count++;
                }
        }
        if (count == 0) {
                err = -ENOENT;
                goto out;
        }
-
-       for (i = 0; i <= xfrm_state_hmask; i++) {
-               hlist_for_each_entry(x, entry, xfrm_state_bydst+i, bydst) {
-                       if (!xfrm_id_proto_match(x->id.proto, proto))
-                               continue;
-                       err = func(x, --count, data);
-                       if (err)
-                               goto out;
-               }
-       }
+       err = func(last, 0, data);
 out:
        spin_unlock_bh(&xfrm_state_lock);
        return err;
@@ -1568,6 +1578,6 @@ void __init xfrm_state_init(void)
                panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
        xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
 
-       INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+       INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
 }
 
index 311205ffa7750747197135e23984f0adb6e5178c..e5372b11fc8f105bd4ecf6071c0418f71855760c 100644 (file)
@@ -31,6 +31,7 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <linux/in6.h>
 #endif
+#include <linux/audit.h>
 
 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
 {
@@ -454,6 +455,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
        else
                err = xfrm_state_update(x);
 
+       xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                      AUDIT_MAC_IPSEC_ADDSA, err ? 0 : 1, NULL, x);
+
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
                __xfrm_state_put(x);
@@ -523,6 +527,10 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
        }
 
        err = xfrm_state_delete(x);
+
+       xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                      AUDIT_MAC_IPSEC_DELSA, err ? 0 : 1, NULL, x);
+
        if (err < 0)
                goto out;
 
@@ -1030,6 +1038,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
         * a type XFRM_MSG_UPDPOLICY - JHS */
        excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
        err = xfrm_policy_insert(p->dir, xp, excl);
+       xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                      AUDIT_MAC_IPSEC_DELSPD, err ? 0 : 1, xp, NULL);
+
        if (err) {
                security_xfrm_policy_free(xp);
                kfree(xp);
@@ -1257,6 +1268,10 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
                xp = xfrm_policy_bysel_ctx(type, p->dir, &p->sel, tmp.security, delete);
                security_xfrm_policy_free(&tmp);
        }
+       if (delete)
+               xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                              AUDIT_MAC_IPSEC_DELSPD, (xp) ? 1 : 0, xp, NULL);
+
        if (xp == NULL)
                return -ENOENT;
 
@@ -1291,8 +1306,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma
 {
        struct km_event c;
        struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
+       struct xfrm_audit audit_info;
 
-       xfrm_state_flush(p->proto);
+       audit_info.loginuid = NETLINK_CB(skb).loginuid;
+       audit_info.secid = NETLINK_CB(skb).sid;
+       xfrm_state_flush(p->proto, &audit_info);
        c.data.proto = p->proto;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
@@ -1442,12 +1460,15 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x
        struct km_event c;
        u8 type = XFRM_POLICY_TYPE_MAIN;
        int err;
+       struct xfrm_audit audit_info;
 
        err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
        if (err)
                return err;
 
-       xfrm_policy_flush(type);
+       audit_info.loginuid = NETLINK_CB(skb).loginuid;
+       audit_info.secid = NETLINK_CB(skb).sid;
+       xfrm_policy_flush(type, &audit_info);
        c.data.type = type;
        c.event = nlh->nlmsg_type;
        c.seq = nlh->nlmsg_seq;
@@ -1502,6 +1523,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void *
        err = 0;
        if (up->hard) {
                xfrm_policy_delete(xp, p->dir);
+               xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                               AUDIT_MAC_IPSEC_DELSPD, 1, xp, NULL);
+
        } else {
                // reset the timers here?
                printk("Dont know what to do with soft policy expire\n");
@@ -1533,8 +1557,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, void **
                goto out;
        km_state_expired(x, ue->hard, current->pid);
 
-       if (ue->hard)
+       if (ue->hard) {
                __xfrm_state_delete(x);
+               xfrm_audit_log(NETLINK_CB(skb).loginuid, NETLINK_CB(skb).sid,
+                              AUDIT_MAC_IPSEC_DELSA, 1, NULL, x);
+       }
 out:
        spin_unlock_bh(&x->lock);
        xfrm_state_put(x);
index 22d281c6ec244c16bb0dc1c870e7f5c5ce913fb5..f359b730c2c57572b46f47f7c344137c7b5f9c55 100644 (file)
@@ -43,7 +43,7 @@ struct sym_entry {
 
 static struct sym_entry *table;
 static unsigned int table_size, table_cnt;
-static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
+static unsigned long long _text, _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
 static int all_symbols = 0;
 static char symbol_prefix_char = '\0';
 
@@ -91,7 +91,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
                sym++;
 
        /* Ignore most absolute/undefined (?) symbols. */
-       if (strcmp(sym, "_stext") == 0)
+       if (strcmp(sym, "_text") == 0)
+               _text = s->addr;
+       else if (strcmp(sym, "_stext") == 0)
                _stext = s->addr;
        else if (strcmp(sym, "_etext") == 0)
                _etext = s->addr;
@@ -265,9 +267,25 @@ static void write_src(void)
 
        printf(".data\n");
 
+       /* Provide proper symbols relocatability by their '_text'
+        * relativeness.  The symbol names cannot be used to construct
+        * normal symbol references as the list of symbols contains
+        * symbols that are declared static and are private to their
+        * .o files.  This prevents .tmp_kallsyms.o or any other
+        * object from referencing them.
+        */
        output_label("kallsyms_addresses");
        for (i = 0; i < table_cnt; i++) {
-               printf("\tPTR\t%#llx\n", table[i].addr);
+               if (toupper(table[i].sym[0]) != 'A') {
+                       if (_text <= table[i].addr)
+                               printf("\tPTR\t_text + %#llx\n",
+                                       table[i].addr - _text);
+                       else
+                               printf("\tPTR\t_text - %#llx\n",
+                                       _text - table[i].addr);
+               } else {
+                       printf("\tPTR\t%#llx\n", table[i].addr);
+               }
        }
        printf("\n");
 
index 338bdea9654177316a9044c86a9667039fb6845e..f5628c57640b134fe2f8689fd3993d43f011ca3e 100644 (file)
@@ -798,7 +798,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e)
                        QAction *action;
 
                        headerPopup = new QPopupMenu(this);
-                       action = new QAction("Show Name", 0, this);
+                       action = new QAction(NULL, "Show Name", 0, this);
                          action->setToggleAction(TRUE);
                          connect(action, SIGNAL(toggled(bool)),
                                  parent(), SLOT(setShowName(bool)));
@@ -806,7 +806,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e)
                                  action, SLOT(setOn(bool)));
                          action->setOn(showName);
                          action->addTo(headerPopup);
-                       action = new QAction("Show Range", 0, this);
+                       action = new QAction(NULL, "Show Range", 0, this);
                          action->setToggleAction(TRUE);
                          connect(action, SIGNAL(toggled(bool)),
                                  parent(), SLOT(setShowRange(bool)));
@@ -814,7 +814,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e)
                                  action, SLOT(setOn(bool)));
                          action->setOn(showRange);
                          action->addTo(headerPopup);
-                       action = new QAction("Show Data", 0, this);
+                       action = new QAction(NULL, "Show Data", 0, this);
                          action->setToggleAction(TRUE);
                          connect(action, SIGNAL(toggled(bool)),
                                  parent(), SLOT(setShowData(bool)));
@@ -1161,7 +1161,7 @@ void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char
 QPopupMenu* ConfigInfoView::createPopupMenu(const QPoint& pos)
 {
        QPopupMenu* popup = Parent::createPopupMenu(pos);
-       QAction* action = new QAction("Show Debug Info", 0, popup);
+       QAction* action = new QAction(NULL,"Show Debug Info", 0, popup);
          action->setToggleAction(TRUE);
          connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
          connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
index 187f5de4612c2247bee0e003ac397db6ff70cbc1..df3b272f7ce6b1d47bc2014b9c5f18e9ec763d2b 100755 (executable)
@@ -1430,7 +1430,7 @@ sub create_parameterlist($$$) {
            # corresponding data structures "correctly". Catch it later in
            # output_* subs.
            push_parameter($arg, "", $file);
-       } elsif ($arg =~ m/\(/) {
+       } elsif ($arg =~ m/\(.*\*/) {
            # pointer-to-function
            $arg =~ tr/#/,/;
            $arg =~ m/[^\(]+\(\*([^\)]+)\)/;
index 2e1141623147887ffa2f91bb6e53b362a0a5f821..ac0a582229924d3d65d7903724fe2900a424088a 100644 (file)
@@ -911,6 +911,7 @@ static int init_section_ref_ok(const char *name)
                ".toc1",  /* used by ppc64 */
                ".stab",
                ".rodata",
+               ".parainstructions",
                ".text.lock",
                "__bug_table", /* used by powerpc for BUG() */
                ".pci_fixup_header",
@@ -931,6 +932,7 @@ static int init_section_ref_ok(const char *name)
                ".altinstructions",
                ".eh_frame",
                ".debug",
+               ".parainstructions",
                NULL
        };
        /* part of section name */
index 84999f69773d6cc232ea6db365eac38e4e1b83ba..72876dfadc8ae9fd6125737910cd8555ae8d17f1 100755 (executable)
@@ -48,6 +48,8 @@ fsck.reiser4 -V 2>&1 | grep ^fsck.reiser4 | awk \
 xfs_db -V 2>&1 | grep version | awk \
 'NR==1{print "xfsprogs              ", $3}'
 
+pccardctl -V 2>&1| grep pcmciautils | awk '{print "pcmciautils           ", $2}'
+
 cardmgr -V 2>&1| grep version | awk \
 'NR==1{print "pcmcia-cs             ", $3}'
 
@@ -87,10 +89,16 @@ loadkeys -h 2>&1 | awk \
 loadkeys -V 2>&1 | awk \
 '(NR==1 && ($2 ~ /console-tools/)) {print "Console-tools         ", $3}'
 
+oprofiled --version 2>&1 | awk \
+'(NR==1 && ($2 == "oprofile")) {print "oprofile              ", $3}'
+
 expr --v 2>&1 | awk 'NR==1{print "Sh-utils              ", $NF}'
 
 udevinfo -V 2>&1 | grep version | awk '{print "udev                  ", $3}'
 
+iwconfig --version 2>&1 | awk \
+'(NR==1 && ($3 == "version")) {print "wireless-tools        ",$4}'
+
 if [ -e /proc/modules ]; then
     X=`cat /proc/modules | sed -e "s/ .*$//"`
     echo "Modules Loaded         "$X
index 80de8c3e9cc3ea49c79e6525b0ce17b22076ac3e..ac9326c5f1da3f1e1781d8654ac816bb83355560 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/err.h>
 #include "internal.h"
 
-static kmem_cache_t    *key_jar;
+static struct kmem_cache       *key_jar;
 struct rb_root         key_serial_tree; /* tree of keys indexed by serial */
 DEFINE_SPINLOCK(key_serial_lock);
 
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock);
 static LIST_HEAD(key_types_list);
 static DECLARE_RWSEM(key_types_sem);
 
-static void key_cleanup(void *data);
-static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
+static void key_cleanup(struct work_struct *work);
+static DECLARE_WORK(key_cleanup_task, key_cleanup);
 
 /* we serialise key instantiation and link */
 DECLARE_RWSEM(key_construction_sem);
@@ -285,16 +285,14 @@ struct key *key_alloc(struct key_type *type, const char *desc,
        }
 
        /* allocate and initialise the key and its description */
-       key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
+       key = kmem_cache_alloc(key_jar, GFP_KERNEL);
        if (!key)
                goto no_memory_2;
 
        if (desc) {
-               key->description = kmalloc(desclen, GFP_KERNEL);
+               key->description = kmemdup(desc, desclen, GFP_KERNEL);
                if (!key->description)
                        goto no_memory_3;
-
-               memcpy(key->description, desc, desclen);
        }
 
        atomic_set(&key->usage, 1);
@@ -552,7 +550,7 @@ EXPORT_SYMBOL(key_negate_and_link);
  * do cleaning up in process context so that we don't have to disable
  * interrupts all over the place
  */
-static void key_cleanup(void *data)
+static void key_cleanup(struct work_struct *work)
 {
        struct rb_node *_n;
        struct key *key;
index e8d02acc51e7180f23a31b849ccce820418fd55c..ad45ce73964b193db630606c2d302dd6414f57c9 100644 (file)
@@ -706,12 +706,10 @@ int __key_link(struct key *keyring, struct key *key)
                                BUG_ON(size > PAGE_SIZE);
 
                                ret = -ENOMEM;
-                               nklist = kmalloc(size, GFP_KERNEL);
+                               nklist = kmemdup(klist, size, GFP_KERNEL);
                                if (!nklist)
                                        goto error2;
 
-                               memcpy(nklist, klist, size);
-
                                /* replace matched key */
                                atomic_inc(&key->usage);
                                nklist->keys[loop] = key;
index 32150cf7c37f8de1662758746e1123a6e8a100e6..b6f86808475a0aedfb28e9d677fda7db553f006b 100644 (file)
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(key_session_mutex);
 struct key_user root_key_user = {
        .usage          = ATOMIC_INIT(3),
        .consq          = LIST_HEAD_INIT(root_key_user.consq),
-       .lock           = SPIN_LOCK_UNLOCKED,
+       .lock           = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
        .nkeys          = ATOMIC_INIT(2),
        .nikeys         = ATOMIC_INIT(2),
        .uid            = 0,
index 74c0319c417e1e143bb7cd468c9744d8fabf88bb..e7c0b5e2066bbb532995ba0758516c236b60c1d1 100644 (file)
@@ -124,7 +124,7 @@ DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
 
 static struct avc_cache avc_cache;
 static struct avc_callback_node *avc_callbacks;
-static kmem_cache_t *avc_node_cachep;
+static struct kmem_cache *avc_node_cachep;
 
 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
 {
@@ -332,7 +332,7 @@ static struct avc_node *avc_alloc_node(void)
 {
        struct avc_node *node;
 
-       node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC);
+       node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC);
        if (!node)
                goto out;
 
@@ -496,7 +496,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
                audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
 
-static inline void avc_print_ipv4_addr(struct audit_buffer *ab, u32 addr,
+static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
                                       __be16 port, char *name1, char *name2)
 {
        if (addr)
index a29d78d3f44c26a41df206530f0ed209149d3231..44e9cd470543320ad736af1db38e2e63b94648ca 100644 (file)
@@ -124,7 +124,7 @@ static struct security_operations *secondary_ops = NULL;
 static LIST_HEAD(superblock_security_head);
 static DEFINE_SPINLOCK(sb_security_lock);
 
-static kmem_cache_t *sel_inode_cache;
+static struct kmem_cache *sel_inode_cache;
 
 /* Return security context for a given sid or just the context 
    length if the buffer is null or length is 0 */
@@ -181,7 +181,7 @@ static int inode_alloc_security(struct inode *inode)
        struct task_security_struct *tsec = current->security;
        struct inode_security_struct *isec;
 
-       isec = kmem_cache_alloc(sel_inode_cache, SLAB_KERNEL);
+       isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL);
        if (!isec)
                return -ENOMEM;
 
@@ -3537,7 +3537,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        /* Handle mapped IPv4 packets arriving via IPv6 sockets */
-       if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP))
+       if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
                family = PF_INET;
 
        AVC_AUDIT_DATA_INIT(&ad, NET);
index 960ef18ddc41185f2c642939071999b20dccc890..6ed10c3d3339baf03cb8e79d0357b078a14427bf 100644 (file)
@@ -54,12 +54,12 @@ struct avc_audit_data {
                        char *netif;
                        struct sock *sk;
                        u16 family;
-                       u16 dport;
-                       u16 sport;
+                       __be16 dport;
+                       __be16 sport;
                        union {
                                struct {
-                                       u32 daddr;
-                                       u32 saddr;
+                                       __be32 daddr;
+                                       __be32 saddr;
                                } v4;
                                struct {
                                        struct in6_addr daddr;
index d049c7acbc8bc9b5a03853eeeb0496ac0901ee45..ebb993c5c244d3e5fb72468eee758fe40550e23a 100644 (file)
@@ -28,7 +28,7 @@
  (keyp->source_type << 9)) & \
  AVTAB_HASH_MASK)
 
-static kmem_cache_t *avtab_node_cachep;
+static struct kmem_cache *avtab_node_cachep;
 
 static struct avtab_node*
 avtab_insert_node(struct avtab *h, int hvalue,
@@ -36,7 +36,7 @@ avtab_insert_node(struct avtab *h, int hvalue,
                  struct avtab_key *key, struct avtab_datum *datum)
 {
        struct avtab_node * newnode;
-       newnode = kmem_cache_alloc(avtab_node_cachep, SLAB_KERNEL);
+       newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL);
        if (newnode == NULL)
                return NULL;
        memset(newnode, 0, sizeof(struct avtab_node));
index 3a61f31155732529101fb685cf9d2a85a94e284e..ee64f5de896617b1f17dbb4b5a7064eef257b552 100644 (file)
@@ -59,10 +59,10 @@ struct gpio_methods {
 };
 
 struct gpio_notification {
+       struct delayed_work work;
        notify_func_t notify;
        void *data;
        void *gpio_private;
-       struct work_struct work;
        struct mutex mutex;
 };
 
index 40eb47eccf9a9acfe0304284054607ecaa7e79f2..2b03bc798bcb784a11b61a34c9d52045db8fdd52 100644 (file)
@@ -195,9 +195,10 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt)
        ftr_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void ftr_handle_notify(void *data)
+static void ftr_handle_notify(struct work_struct *work)
 {
-       struct gpio_notification *notif = data;
+       struct gpio_notification *notif =
+               container_of(work, struct gpio_notification, work.work);
 
        mutex_lock(&notif->mutex);
        if (notif->notify)
@@ -253,12 +254,9 @@ static void ftr_gpio_init(struct gpio_runtime *rt)
 
        ftr_gpio_all_amps_off(rt);
        rt->implementation_private = 0;
-       INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify,
-                 &rt->headphone_notify);
-       INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify,
-                 &rt->line_in_notify);
-       INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify,
-                 &rt->line_out_notify);
+       INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify);
        mutex_init(&rt->headphone_notify.mutex);
        mutex_init(&rt->line_in_notify.mutex);
        mutex_init(&rt->line_out_notify.mutex);
@@ -287,7 +285,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data)
 {
        struct gpio_notification *notif = data;
 
-       schedule_work(&notif->work);
+       schedule_delayed_work(&notif->work, 0);
 
        return IRQ_HANDLED;
 }
index 2836c3218391c549156e0b3e53048daf6ec64cd1..5ca2220eac7d648a598488aca8858ef45f1e6afd 100644 (file)
@@ -69,9 +69,10 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt)
        pmf_gpio_set_lineout(rt, (s>>2)&1);
 }
 
-static void pmf_handle_notify(void *data)
+static void pmf_handle_notify(struct work_struct *work)
 {
-       struct gpio_notification *notif = data;
+       struct gpio_notification *notif =
+               container_of(work, struct gpio_notification, work.work);
 
        mutex_lock(&notif->mutex);
        if (notif->notify)
@@ -83,12 +84,9 @@ static void pmf_gpio_init(struct gpio_runtime *rt)
 {
        pmf_gpio_all_amps_off(rt);
        rt->implementation_private = 0;
-       INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify,
-                 &rt->headphone_notify);
-       INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify,
-                 &rt->line_in_notify);
-       INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify,
-                 &rt->line_out_notify);
+       INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify);
+       INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify);
        mutex_init(&rt->headphone_notify.mutex);
        mutex_init(&rt->line_in_notify.mutex);
        mutex_init(&rt->line_out_notify.mutex);
@@ -129,7 +127,7 @@ static void pmf_handle_notify_irq(void *data)
 {
        struct gpio_notification *notif = data;
 
-       schedule_work(&notif->work);
+       schedule_delayed_work(&notif->work, 0);
 }
 
 static int pmf_set_notify(struct gpio_runtime *rt,
index c79a9afd0955e2b9a2fa98fb5872616cef819ac4..c7e1b26461938eea09e0381fd0e83e213219fb4f 100644 (file)
@@ -125,7 +125,7 @@ struct audio_stream {
 #else
        dma_regs_t *dma_regs;   /* points to our DMA registers */
 #endif
-       int active:1;           /* we are using this stream for transfer now */
+       unsigned int active:1;  /* we are using this stream for transfer now */
        int period;             /* current transfer period */
        int periods;            /* current count of periods registerd in the DMA engine */
        int tx_spin;            /* are we recoding - flag used to do DMA trans. for sync */
index 66e24b5da4694dbaa40529a98b14743520aa41ef..6ea67b16c676a90ce8e2ae821037b2643bd7fd94 100644 (file)
@@ -3027,7 +3027,7 @@ static struct page * snd_pcm_mmap_status_nopage(struct vm_area_struct *area,
        struct page * page;
        
        if (substream == NULL)
-               return NOPAGE_OOM;
+               return NOPAGE_SIGBUS;
        runtime = substream->runtime;
        page = virt_to_page(runtime->status);
        get_page(page);
@@ -3070,7 +3070,7 @@ static struct page * snd_pcm_mmap_control_nopage(struct vm_area_struct *area,
        struct page * page;
        
        if (substream == NULL)
-               return NOPAGE_OOM;
+               return NOPAGE_SIGBUS;
        runtime = substream->runtime;
        page = virt_to_page(runtime->control);
        get_page(page);
@@ -3131,18 +3131,18 @@ static struct page *snd_pcm_mmap_data_nopage(struct vm_area_struct *area,
        size_t dma_bytes;
        
        if (substream == NULL)
-               return NOPAGE_OOM;
+               return NOPAGE_SIGBUS;
        runtime = substream->runtime;
        offset = area->vm_pgoff << PAGE_SHIFT;
        offset += address - area->vm_start;
-       snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+       snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
        dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
        if (offset > dma_bytes - PAGE_SIZE)
                return NOPAGE_SIGBUS;
        if (substream->ops->page) {
                page = substream->ops->page(substream, offset);
                if (! page)
-                       return NOPAGE_OOM;
+                       return NOPAGE_OOM; /* XXX: is this really due to OOM? */
        } else {
                vaddr = runtime->dma_area + offset;
                page = virt_to_page(vaddr);
index 12ffffc9e8148ceb65cd0b5c8e19c75773bf6cc1..d2f2c5078e65d2b1885dac4380c7e732f23ef7fd 100644 (file)
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL");
 
 #define AK4114_ADDR                    0x00 /* fixed address */
 
-static void ak4114_stats(void *);
+static void ak4114_stats(struct work_struct *work);
 
 static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val)
 {
@@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
        reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN);
        /* bring up statistics / event queing */
        chip->init = 0;
-       INIT_WORK(&chip->work, ak4114_stats, chip);
+       INIT_DELAYED_WORK(&chip->work, ak4114_stats);
        queue_delayed_work(chip->workqueue, &chip->work, HZ / 10);
 }
 
@@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
        return res;
 }
 
-static void ak4114_stats(void *data)
+static void ak4114_stats(struct work_struct *work)
 {
-       struct ak4114 *chip = (struct ak4114 *)data;
+       struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
        if (chip->init)
                return;
index cc2b9ab7f4e5355d490aeebff240240b2a9446f1..a0588c21324ac6ae98fbfccbf286139c1a97d582 100644 (file)
@@ -5,20 +5,6 @@
 #
 # Prompt user for primary drivers.
 
-config OSS_OBSOLETE_DRIVER
-       bool "Obsolete OSS drivers"
-       depends on SOUND_PRIME
-       help
-         This option enables support for obsolete OSS drivers that
-         are scheduled for removal in the near future since there
-         are ALSA drivers for the same hardware.
-
-         Please contact Adrian Bunk <bunk@stusta.de> if you had to
-         say Y here because your soundcard is not properly supported
-         by ALSA.
-
-         If unsure, say N.
-
 config SOUND_BT878
        tristate "BT878 audio dma"
        depends on SOUND_PRIME && PCI
@@ -35,40 +21,6 @@ config SOUND_BT878
          To compile this driver as a module, choose M here: the module will
          be called btaudio.
 
-config SOUND_EMU10K1
-       tristate "Creative SBLive! (EMU10K1)"
-       depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-       ---help---
-         Say Y or M if you have a PCI sound card using the EMU10K1 chipset,
-         such as the Creative SBLive!, SB PCI512 or Emu-APS.
-
-         For more information on this driver and the degree of support for
-         the different card models please check:
-
-               <http://sourceforge.net/projects/emu10k1/>
-
-         It is now possible to load dsp microcode patches into the EMU10K1
-         chip.  These patches are used to implement real time sound
-         processing effects which include for example: signal routing,
-         bass/treble control, AC3 passthrough, ...
-         Userspace tools to create new patches and load/unload them can be
-         found in the emu-tools package at the above URL.
-
-config MIDI_EMU10K1
-       bool "Creative SBLive! MIDI (EXPERIMENTAL)"
-       depends on SOUND_EMU10K1 && EXPERIMENTAL && ISA_DMA_API
-       help
-         Say Y if you want to be able to use the OSS /dev/sequencer
-         interface.  This code is still experimental.
-
-config SOUND_FUSION
-       tristate "Crystal SoundFusion (CS4280/461x)"
-       depends on SOUND_PRIME && PCI && OSS_OBSOLETE_DRIVER
-       help
-         This module drives the Crystal SoundFusion devices (CS4280/46xx
-         series) when wired as native sound drivers with AC97 codecs.  If
-         this driver does not work try the CS4232 driver.
-
 config SOUND_BCM_CS4297A
        tristate "Crystal Sound CS4297a (for Swarm)"
        depends on SOUND_PRIME && SIBYTE_SWARM
@@ -448,47 +400,6 @@ config SOUND_DMAP
 
          Say Y unless you have 16MB or more RAM or a PCI sound card.
 
-config SOUND_AD1816
-       tristate "AD1816(A) based cards (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && SOUND_OSS && OSS_OBSOLETE_DRIVER
-       help
-         Say M here if you have a sound card based on the Analog Devices
-         AD1816(A) chip.
-
-         If you compile the driver into the kernel, you have to add
-         "ad1816=<io>,<irq>,<dma>,<dma2>" to the kernel command line.
-
-config SOUND_AD1889
-       tristate "AD1889 based cards (AD1819 codec) (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && SOUND_OSS && PCI && OSS_OBSOLETE_DRIVER
-       help
-         Say M here if you have a sound card based on the Analog Devices
-         AD1889 chip.
-
-config SOUND_ADLIB
-       tristate "Adlib Cards"
-       depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-       help
-         Includes ASB 64 4D. Information on programming AdLib cards is
-         available at <http://www.itsnet.com/home/ldragon/Specs/adlib.html>.
-
-config SOUND_ACI_MIXER
-       tristate "ACI mixer (miroSOUND PCM1-pro/PCM12/PCM20)"
-       depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-       ---help---
-         ACI (Audio Command Interface) is a protocol used to communicate with
-         the microcontroller on some sound cards produced by miro and
-         Cardinal Technologies.  The main function of the ACI is to control
-         the mixer and to get a product identification.
-
-         This VoxWare ACI driver currently supports the ACI functions on the
-         miroSOUND PCM1-pro, PCM12 and PCM20 radio. On the PCM20 radio, ACI
-         also controls the radio tuner. This is supported in the video4linux
-         miropcm20 driver (say M or Y here and go back to "Multimedia
-         devices" -> "Radio Adapters").
-
-         This driver is also available as a module and will be called aci.
-
 config SOUND_CS4232
        tristate "Crystal CS4232 based (PnP) cards"
        depends on SOUND_OSS
@@ -594,18 +505,6 @@ config SOUND_MPU401
          If you compile the driver into the kernel, you have to add
          "mpu401=<io>,<irq>" to the kernel command line.
 
-config SOUND_NM256
-       tristate "NM256AV/NM256ZX audio support"
-       depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-       help
-         Say M here to include audio support for the NeoMagic 256AV/256ZX
-         chipsets. These are the audio chipsets found in the Sony
-         Z505S/SX/DX, some Sony F-series, and the Dell Latitude CPi and CPt
-         laptops. It includes support for an AC97-compatible mixer and an
-         apparently proprietary sound engine.
-
-         See <file:Documentation/sound/oss/NM256> for further information.
-
 config SOUND_PAS
        tristate "ProAudioSpectrum 16 support"
        depends on SOUND_OSS
@@ -714,20 +613,6 @@ config SOUND_YM3812
 
          If unsure, say Y.
 
-config SOUND_OPL3SA2
-       tristate "Yamaha OPL3-SA2 and SA3 based PnP cards"
-       depends on SOUND_OSS && OSS_OBSOLETE_DRIVER
-       help
-         Say Y or M if you have a card based on one of these Yamaha sound
-         chipsets or the "SAx", which is actually a SA3. Read
-         <file:Documentation/sound/oss/OPL3-SA2> for more information on
-         configuring these cards.
-
-         If you compile the driver into the kernel and do not also
-         configure in the optional ISA PnP support, you will have to add
-         "opl3sa2=<io>,<irq>,<dma>,<dma2>,<mssio>,<mpuio>" to the kernel
-         command line.
-
 config SOUND_UART6850
        tristate "6850 UART support"
        depends on SOUND_OSS
index 6ad38411423989b7d3232e92c7cd43f820574634..ad7210a00dc01ead45e119d7fed3f2c613124ed6 100644 (file)
@@ -1020,6 +1020,7 @@ static int __devinit btaudio_probe(struct pci_dev *pci_dev,
  fail2:
         free_irq(bta->irq,bta);        
  fail1:
+       iounmap(bta->mmio);
        kfree(bta);
  fail0:
        release_mem_region(pci_resource_start(pci_dev,0),
@@ -1051,6 +1052,7 @@ static void __devexit btaudio_remove(struct pci_dev *pci_dev)
         free_irq(bta->irq,bta);
        release_mem_region(pci_resource_start(pci_dev,0),
                           pci_resource_len(pci_dev,0));
+       iounmap(bta->mmio);
 
        /* remove from linked list */
        if (bta == btaudios) {
index 86dd23974e055110817c5d6a3f8e03664d487f2a..49f902f35c280a32d6441bcb6f1808fe811f1e46 100644 (file)
@@ -111,9 +111,15 @@ static ssize_t emu10k1_audio_read(struct file *file, char __user *buffer, size_t
 
                if ((bytestocopy >= wiinst->buffer.fragment_size)
                    || (bytestocopy >= count)) {
+                       int rc;
+
                        bytestocopy = min_t(u32, bytestocopy, count);
 
-                       emu10k1_wavein_xferdata(wiinst, (u8 __user *)buffer, &bytestocopy);
+                       rc = emu10k1_wavein_xferdata(wiinst,
+                                                    (u8 __user *)buffer,
+                                                    &bytestocopy);
+                       if (rc)
+                               return rc;
 
                        count -= bytestocopy;
                        buffer += bytestocopy;
index 8bbf44b881b40d3c3f2f7904e002a09769a74795..060d1be94d335f5fc5c6c1ef885c3e854bf69692 100644 (file)
@@ -304,11 +304,12 @@ void emu10k1_wavein_getxfersize(struct wiinst *wiinst, u32 * size)
        }
 }
 
-static void copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
+static int copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
 {
-       if (cov == 1)
-               __copy_to_user(dst, src + str, len);
-       else {
+       if (cov == 1) {
+               if (__copy_to_user(dst, src + str, len))
+                       return -EFAULT;
+       } else {
                u8 byte;
                u32 i;
 
@@ -316,22 +317,26 @@ static void copy_block(u8 __user *dst, u8 * src, u32 str, u32 len, u8 cov)
 
                for (i = 0; i < len; i++) {
                        byte = src[2 * i] ^ 0x80;
-                       __copy_to_user(dst + i, &byte, 1);
+                       if (__copy_to_user(dst + i, &byte, 1))
+                               return -EFAULT;
                }
        }
+
+       return 0;
 }
 
-void emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
+int emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
 {
        struct wavein_buffer *buffer = &wiinst->buffer;
        u32 sizetocopy, sizetocopy_now, start;
        unsigned long flags;
+       int ret;
 
        sizetocopy = min_t(u32, buffer->size, *size);
        *size = sizetocopy;
 
        if (!sizetocopy)
-               return;
+               return 0;
 
        spin_lock_irqsave(&wiinst->lock, flags);
        start = buffer->pos;
@@ -345,11 +350,17 @@ void emu10k1_wavein_xferdata(struct wiinst *wiinst, u8 __user *data, u32 * size)
        if (sizetocopy > sizetocopy_now) {
                sizetocopy -= sizetocopy_now;
 
-               copy_block(data, buffer->addr, start, sizetocopy_now, buffer->cov);
-               copy_block(data + sizetocopy_now, buffer->addr, 0, sizetocopy, buffer->cov);
+               ret = copy_block(data, buffer->addr, start, sizetocopy_now,
+                                buffer->cov);
+               if (ret == 0)
+                       ret = copy_block(data + sizetocopy_now, buffer->addr, 0,
+                                        sizetocopy, buffer->cov);
        } else {
-               copy_block(data, buffer->addr, start, sizetocopy, buffer->cov);
+               ret = copy_block(data, buffer->addr, start, sizetocopy,
+                                buffer->cov);
        }
+
+       return ret;
 }
 
 void emu10k1_wavein_update(struct emu10k1_card *card, struct wiinst *wiinst)
index 15cfb9b35596009faa5be7a935fe8283cb0807b7..e82029b46ad193970cc571f8e646768fd0ae153e 100644 (file)
@@ -83,7 +83,7 @@ void emu10k1_wavein_close(struct emu10k1_wavedevice *);
 void emu10k1_wavein_start(struct emu10k1_wavedevice *);
 void emu10k1_wavein_stop(struct emu10k1_wavedevice *);
 void emu10k1_wavein_getxfersize(struct wiinst *, u32 *);
-void emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
+int emu10k1_wavein_xferdata(struct wiinst *, u8 __user *, u32 *);
 int emu10k1_wavein_setformat(struct emu10k1_wavedevice *, struct wave_format *);
 void emu10k1_wavein_update(struct emu10k1_card *, struct wiinst *);
 
index 4e3baca7d41f3d2856accd469e0292d749a07e47..6d21d4368decbe08aef8cdfd5c6745d7122c2bc1 100644 (file)
@@ -162,12 +162,15 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co
 
                DPD(3, "prepend size %d, prepending %d bytes\n", pt->prepend_size, needed);
                if (count < needed) {
-                       copy_from_user(pt->buf + pt->prepend_size, buffer, count);
+                       if (copy_from_user(pt->buf + pt->prepend_size,
+                                          buffer, count))
+                               return -EFAULT;
                        pt->prepend_size += count;
                        DPD(3, "prepend size now %d\n", pt->prepend_size);
                        return count;
                }
-               copy_from_user(pt->buf + pt->prepend_size, buffer, needed);
+               if (copy_from_user(pt->buf + pt->prepend_size, buffer, needed))
+                       return -EFAULT;
                r = pt_putblock(wave_dev, (u16 *) pt->buf, nonblock);
                if (r)
                        return r;
@@ -178,7 +181,8 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co
        blocks_copied = 0;
        while (blocks > 0) {
                u16 __user *bufptr = (u16 __user *) buffer + (bytes_copied/2);
-               copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE);
+               if (copy_from_user(pt->buf, bufptr, PT_BLOCKSIZE))
+                       return -EFAULT;
                r = pt_putblock(wave_dev, (u16 *)pt->buf, nonblock);
                if (r) {
                        if (bytes_copied)
@@ -193,7 +197,8 @@ ssize_t emu10k1_pt_write(struct file *file, const char __user *buffer, size_t co
        i = count - bytes_copied;
        if (i) {
                pt->prepend_size = i;
-               copy_from_user(pt->buf, buffer + bytes_copied, i);
+               if (copy_from_user(pt->buf, buffer + bytes_copied, i))
+                       return -EFAULT;
                bytes_copied += i;
                DPD(3, "filling prepend buffer with %d bytes", i);
        }
index 17837d4b5ed311effabe0b548fae61660a1fd1fa..c96cc8c68b3bec1135aefc7fd7e6c370c540121d 100644 (file)
@@ -2120,8 +2120,8 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
                return NOPAGE_SIGBUS; /* Disallow mremap */
        }
         if (!card) {
-               DPRINTK ("EXIT, returning NOPAGE_OOM\n");
-               return NOPAGE_OOM;      /* Nothing allocated */
+               DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
+               return NOPAGE_SIGBUS;   /* Nothing allocated */
        }
 
        pgoff = vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
index 6577b232535784b6f6bcf7b2db70448e423afcf1..7abcb10b2754c01b5c156006ff92504c2c75134b 100644 (file)
@@ -1927,9 +1927,10 @@ static int snd_ac97_dev_disconnect(struct snd_device *device)
 static struct snd_ac97_build_ops null_build_ops;
 
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-static void do_update_power(void *data)
+static void do_update_power(struct work_struct *work)
 {
-       update_power_regs(data);
+       update_power_regs(
+               container_of(work, struct snd_ac97, power_work.work));
 }
 #endif
 
@@ -1989,7 +1990,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
        mutex_init(&ac97->page_mutex);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
        ac97->power_workq = create_workqueue("ac97");
-       INIT_WORK(&ac97->power_work, do_update_power, ac97);
+       INIT_DELAYED_WORK(&ac97->power_work, do_update_power);
 #endif
 
 #ifdef CONFIG_PCI
index 9c3d7ac0806883f1480f6bbebf267f423d717f13..71482c15a852b0d6b5325d449ac353f3f47119af 100644 (file)
@@ -272,10 +272,11 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event);
 /*
  * process queueud unsolicited events
  */
-static void process_unsol_events(void *data)
+static void process_unsol_events(struct work_struct *work)
 {
-       struct hda_bus *bus = data;
-       struct hda_bus_unsolicited *unsol = bus->unsol;
+       struct hda_bus_unsolicited *unsol =
+               container_of(work, struct hda_bus_unsolicited, work);
+       struct hda_bus *bus = unsol->bus;
        struct hda_codec *codec;
        unsigned int rp, caddr, res;
 
@@ -314,7 +315,8 @@ static int init_unsol_queue(struct hda_bus *bus)
                kfree(unsol);
                return -ENOMEM;
        }
-       INIT_WORK(&unsol->work, process_unsol_events, bus);
+       INIT_WORK(&unsol->work, process_unsol_events);
+       unsol->bus = bus;
        bus->unsol = unsol;
        return 0;
 }
index f9416c36396eceb0f6e2b86a06394b01651186c6..9ca1baf860bd579ffd64321d2efe5d7ecce8e599 100644 (file)
@@ -206,6 +206,7 @@ struct hda_bus_unsolicited {
        /* workqueue */
        struct workqueue_struct *workq;
        struct work_struct work;
+       struct hda_bus *bus;
 };
 
 /*
index fd3590fcaedbcaeb75ca54efe071b9ad2ddd7272..2d40cc72f23610592c33bca428ae3538c04ec4e8 100644 (file)
@@ -219,35 +219,15 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
 static int pdacf_config(struct pcmcia_device *link)
 {
        struct snd_pdacf *pdacf = link->priv;
-       tuple_t tuple;
-       cisparse_t *parse = NULL;
-       u_short buf[32];
        int last_fn, last_ret;
 
        snd_printdd(KERN_DEBUG "pdacf_config called\n");
-       parse = kmalloc(sizeof(*parse), GFP_KERNEL);
-       if (! parse) {
-               snd_printk(KERN_ERR "pdacf_config: cannot allocate\n");
-               return -ENOMEM;
-       }
-       tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
-       tuple.Attributes = 0;
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-       link->conf.ConfigBase = parse->config.base;
        link->conf.ConfigIndex = 0x5;
 
        CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
        CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
        CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
 
-       kfree(parse);
-
        if (snd_pdacf_assign_resources(pdacf, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
                goto failed;
 
@@ -255,7 +235,6 @@ static int pdacf_config(struct pcmcia_device *link)
        return 0;
 
 cs_failed:
-       kfree(parse);
        cs_error(link, last_fn, last_ret);
 failed:
        pcmcia_disable_device(link);
@@ -299,7 +278,8 @@ static int pdacf_resume(struct pcmcia_device *link)
  * Module entry points
  */
 static struct pcmcia_device_id snd_pdacf_ids[] = {
-       PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45),
+       /* this is too general PCMCIA_DEVICE_MANF_CARD(0x015d, 0x4c45), */
+       PCMCIA_DEVICE_PROD_ID12("Core Sound","PDAudio-CF",0x396d19d2,0x71717b49),
        PCMCIA_DEVICE_NULL
 };
 MODULE_DEVICE_TABLE(pcmcia, snd_pdacf_ids);
index 3089fcca800ecfa872b47f28fd5c7f50baa93d80..d7df59e9c647e43b142f8215e4fac3386a0de196 100644 (file)
@@ -217,34 +217,12 @@ static int vxpocket_config(struct pcmcia_device *link)
 {
        struct vx_core *chip = link->priv;
        struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
-       tuple_t tuple;
-       cisparse_t *parse;
-       u_short buf[32];
        int last_fn, last_ret;
 
        snd_printdd(KERN_DEBUG "vxpocket_config called\n");
-       parse = kmalloc(sizeof(*parse), GFP_KERNEL);
-       if (! parse) {
-               snd_printk(KERN_ERR "vx: cannot allocate\n");
-               return -ENOMEM;
-       }
-       tuple.Attributes = 0;
-       tuple.TupleData = (cisdata_t *)buf;
-       tuple.TupleDataMax = sizeof(buf);
-       tuple.TupleOffset = 0;
-       tuple.DesiredTuple = CISTPL_CONFIG;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-       link->conf.ConfigBase = parse->config.base;
-       link->conf.Present = parse->config.rmask[0];
 
        /* redefine hardware record according to the VERSION1 string */
-       tuple.DesiredTuple = CISTPL_VERS_1;
-       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
-       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
-       CS_CHECK(ParseTuple, pcmcia_parse_tuple(link, &tuple, parse));
-       if (! strcmp(parse->version_1.str + parse->version_1.ofs[1], "VX-POCKET")) {
+       if (!strcmp(link->prod_id[1], "VX-POCKET")) {
                snd_printdd("VX-pocket is detected\n");
        } else {
                snd_printdd("VX-pocket 440 is detected\n");
@@ -265,14 +243,12 @@ static int vxpocket_config(struct pcmcia_device *link)
                goto failed;
 
        link->dev_node = &vxp->node;
-       kfree(parse);
        return 0;
 
 cs_failed:
        cs_error(link, last_fn, last_ret);
 failed:
        pcmcia_disable_device(link);
-       kfree(parse);
        return -ENODEV;
 }
 
index 2fbe1d183fcefdb08d15986836ebc47e679b79b3..8f074c7936e67870329a12b7c9069a346d99fe1f 100644 (file)
@@ -942,10 +942,11 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int
 }
 
 static struct work_struct device_change;
+static struct snd_pmac *device_change_chip;
 
-static void device_change_handler(void *self)
+static void device_change_handler(struct work_struct *work)
 {
-       struct snd_pmac *chip = self;
+       struct snd_pmac *chip = device_change_chip;
        struct pmac_tumbler *mix;
        int headphone, lineout;
 
@@ -1417,7 +1418,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip)
        chip->resume = tumbler_resume;
 #endif
 
-       INIT_WORK(&device_change, device_change_handler, (void *)chip);
+       INIT_WORK(&device_change, device_change_handler);
+       device_change_chip = chip;
 
 #ifdef PMAC_SUPPORT_AUTOMUTE
        if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0)
index 4b52d18dcd539199050bb033e4d4e7ba2e40c736..b76b3dd9df25d48d878548238f040018c443b075 100644 (file)
@@ -48,7 +48,7 @@ static struct page * snd_us428ctls_vm_nopage(struct vm_area_struct *area, unsign
        
        offset = area->vm_pgoff << PAGE_SHIFT;
        offset += address - area->vm_start;
-       snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_OOM);
+       snd_assert((offset % PAGE_SIZE) == 0, return NOPAGE_SIGBUS);
        vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset;
        page = virt_to_page(vaddr);
        get_page(page);